+trigger:
+ paths:
+ exclude:
+ - doc
+ - README.md
+ - CHANGES.md
+ - CONTRIBUTING.md
+
+pr:
+ paths:
+ exclude:
+ - doc
+ - README.md
+ - CHANGES.md
+ - CONTRIBUTING.md
+
resources:
containers:
+ - container: winx86
+ image: pointcloudlibrary/env:winx86
+ - container: winx64
+ image: pointcloudlibrary/env:winx64
- container: fmt
image: pointcloudlibrary/fmt
- - container: env1604
- image: pointcloudlibrary/env:16.04
- container: env1804
image: pointcloudlibrary/env:18.04
- container: env2004
image: pointcloudlibrary/env:20.04
- - container: doc
- image: pointcloudlibrary/doc
+ - container: env2010
+ image: pointcloudlibrary/env:20.10
stages:
- stage: formatting
- job: ubuntu
displayName: Ubuntu
pool:
- vmImage: 'Ubuntu 16.04'
+ vmImage: 'Ubuntu 20.04'
strategy:
matrix:
- 16.04 GCC:
- CONTAINER: env1604
+ 18.04 GCC: # oldest LTS
+ CONTAINER: env1804
CC: gcc
CXX: g++
- BUILD_GPU: OFF
+ BUILD_GPU: ON
CMAKE_ARGS: '-DPCL_WARNINGS_ARE_ERRORS=ON'
- 20.04 GCC:
- CONTAINER: env2004
+ 20.10 GCC: # latest Ubuntu
+ CONTAINER: env2010
CC: gcc
CXX: g++
BUILD_GPU: OFF
- CMAKE_ARGS: ''
container: $[ variables['CONTAINER'] ]
timeoutInMinutes: 0
variables:
BUILD_DIR: '$(Agent.BuildDirectory)/build'
- CMAKE_CXX_FLAGS: '-Wall -Wextra'
+ CMAKE_CXX_FLAGS: '-Wall -Wextra -Wnoexcept-type'
+ DISPLAY: :99.0 # Checked for in CMake
steps:
- template: build/ubuntu.yaml
# Placement of Ubuntu Clang job after macOS ensures an extra parallel job doesn't need to be created.
# Total time per run remains same since macOS is quicker so it finishes earlier, and remaining time is used by this job
# Therefore, number of parallel jobs and total run time of entire pipeline remains unchanged even after addition of this job
+ # The version of Ubuntu is chosen to cover more versions than covered by GCC based CI
dependsOn: osx
condition: succeededOrFailed()
pool:
- vmImage: 'Ubuntu 16.04'
+ vmImage: 'Ubuntu 20.04'
strategy:
matrix:
- 18.04 Clang:
- CONTAINER: env1804
+ 20.04 Clang:
+ CONTAINER: env2004
CC: clang
CXX: clang++
BUILD_GPU: ON
variables:
BUILD_DIR: '$(Agent.BuildDirectory)/build'
CMAKE_CXX_FLAGS: '-Wall -Wextra'
+ DISPLAY: :99.0 # Checked for in CMake
steps:
- template: build/ubuntu.yaml
- job: ubuntu_indices
dependsOn: osx
condition: succeededOrFailed()
pool:
- vmImage: 'Ubuntu 16.04'
+ vmImage: 'Ubuntu 20.04'
strategy:
matrix:
- 18.04 Clang:
- CONTAINER: env1804
+ 20.04 Clang:
+ CONTAINER: env2004
CC: clang
CXX: clang++
INDEX_SIGNED: OFF
displayName: Build MSVC
dependsOn: formatting
jobs:
- - job: vs2017
- displayName: Windows VS2017 Build
+ - job: Windows
+ displayName: Windows Build
pool:
- vmImage: 'vs2017-win2016'
+ vmImage: 'windows-2019'
strategy:
matrix:
x86:
+ CONTAINER: winx86
PLATFORM: 'x86'
ARCHITECTURE: 'x86'
- GENERATOR: 'Visual Studio 15 2017'
+ GENERATOR: '"Visual Studio 16 2019" -A Win32'
x64:
+ CONTAINER: winx64
PLATFORM: 'x64'
ARCHITECTURE: 'x86_amd64'
- GENERATOR: 'Visual Studio 15 2017 Win64'
+ GENERATOR: '"Visual Studio 16 2019" -A x64'
+ container: $[ variables['CONTAINER'] ]
timeoutInMinutes: 0
variables:
- BUILD_DIR: '$(Agent.WorkFolder)\build'
+ BUILD_DIR: 'c:\build'
CONFIGURATION: 'Release'
- VCPKG_ROOT: 'C:\vcpkg'
+ VCPKG_ROOT: 'c:\vcpkg'
steps:
- template: build/windows.yaml
-
- - stage: documentation
- displayName: Documentation
- dependsOn: []
- jobs:
- - template: documentation.yaml
-
- - stage: tutorials
- displayName: Tutorials
- dependsOn: build_gcc
- jobs:
- - template: tutorials.yaml
-
# find the commit hash on a quick non-forced update too
fetchDepth: 10
- script: |
- brew install cmake pkg-config boost eigen flann glew libusb qhull vtk glew qt5 libpcap libomp brewsci/science/openni
+ brew install cmake pkg-config boost eigen flann glew libusb qhull vtk glew qt5 libpcap libomp google-benchmark
+ brew install brewsci/science/openni
git clone https://github.com/abseil/googletest.git $GOOGLE_TEST_DIR # the official endpoint changed to abseil/googletest
cd $GOOGLE_TEST_DIR && git checkout release-1.8.1
displayName: 'Install Dependencies'
-DPCL_ONLY_CORE_POINT_TYPES=ON \
-DBUILD_simulation=ON \
-DBUILD_global_tests=ON \
+ -DBUILD_benchmarks=ON \
-DBUILD_examples=ON \
-DBUILD_tools=ON \
-DBUILD_apps=ON \
steps:
+ - script: (nohup Xvfb :99 -screen 0 1280x1024x24 -nolisten tcp -nolisten unix &)
+ displayName: "Start Xvfb for DISPLAY=$(DISPLAY)"
- checkout: self
# find the commit hash on a quick non-forced update too
fetchDepth: 10
-DBUILD_simulation=ON \
-DBUILD_surface_on_nurbs=ON \
-DBUILD_global_tests=ON \
+ -DBUILD_benchmarks=ON \
-DBUILD_examples=ON \
-DBUILD_tools=ON \
-DBUILD_apps=ON \
-DPCL_ONLY_CORE_POINT_TYPES=ON \
-DPCL_INDEX_SIGNED=$INDEX_SIGNED \
-DPCL_INDEX_SIZE=$INDEX_SIZE \
- -DBUILD_geometry=OFF \
- -DBUILD_tools=OFF \
- -DBUILD_kdtree=OFF \
- -DBUILD_ml=OFF \
- -DBUILD_octree=OFF \
-DBUILD_global_tests=ON
# Temporary fix to ensure no tests are skipped
cmake $(Build.SourcesDirectory)
# find the commit hash on a quick non-forced update too
fetchDepth: 10
- script: |
- echo ##vso[task.setvariable variable=BOOST_ROOT]%BOOST_ROOT_1_69_0%
- displayName: 'Set BOOST_ROOT Environment Variable'
- - script: |
- echo ##vso[task.prependpath]%BOOST_ROOT_1_69_0%\lib
- displayName: 'Include Boost Libraries In System PATH'
- - script: |
- set
- displayName: 'Print Environment Variables'
- - script: |
- vcpkg.exe install eigen3 flann gtest qhull --triplet %PLATFORM%-windows && vcpkg.exe list
- displayName: 'Install C++ Dependencies Via Vcpkg'
- - script: |
- rmdir %VCPKG_ROOT%\downloads /S /Q
- rmdir %VCPKG_ROOT%\packages /S /Q
- displayName: 'Free Up Space'
- - script: |
- mkdir %BUILD_DIR% && cd %BUILD_DIR%
+ mkdir %BUILD_DIR% && cd %BUILD_DIR% && dir
cmake $(Build.SourcesDirectory) ^
- -G"%GENERATOR%" ^
+ -G%GENERATOR% ^
+ -DVCPKG_TARGET_TRIPLET=%PLATFORM%-windows-rel ^
-DCMAKE_BUILD_TYPE="MinSizeRel" ^
- -DCMAKE_TOOLCHAIN_FILE=%VCPKG_ROOT%\scripts\buildsystems\vcpkg.cmake ^
+ -DCMAKE_TOOLCHAIN_FILE="%VCPKG_ROOT%\scripts\buildsystems\vcpkg.cmake" ^
-DVCPKG_APPLOCAL_DEPS=ON ^
-DPCL_BUILD_WITH_BOOST_DYNAMIC_LINKING_WIN32=ON ^
-DPCL_BUILD_WITH_FLANN_DYNAMIC_LINKING_WIN32=ON ^
-DPCL_BUILD_WITH_QHULL_DYNAMIC_LINKING_WIN32=ON ^
-DBUILD_global_tests=ON ^
+ -DBUILD_benchmarks=ON ^
-DBUILD_tools=OFF ^
- -DBUILD_surface_on_nurbs=ON
+ -DBUILD_surface_on_nurbs=ON ^
+ -DPCL_DISABLE_VISUALIZATION_TESTS=ON
displayName: 'CMake Configuration'
- - script: cd %BUILD_DIR% && cmake --build . --config %CONFIGURATION%
+ - script: |
+ cd %BUILD_DIR% && cmake --build . --config %CONFIGURATION%
displayName: 'Build Library'
- - script: cd %BUILD_DIR% && cmake --build . --target tests --config %CONFIGURATION%
+ - script: |
+ cd %BUILD_DIR% && cmake --build . --target tests --config %CONFIGURATION%
displayName: 'Run Unit Tests'
- task: PublishTestResults@2
inputs:
testResultsFormat: 'CTest'
testResultsFiles: '**/Test*.xml'
- searchFolder: '$(Agent.WorkFolder)\build'
+ searchFolder: '$(BUILD_DIR)'
condition: succeededOrFailed()
--- /dev/null
+trigger:
+ paths:
+ include:
+ - doc
+
+pr:
+ paths:
+ include:
+ - doc
+
+resources:
+ pipelines:
+ - pipeline: Build-CI
+ source: Build
+ trigger:
+ stages:
+ - build_gcc
+ containers:
+ - container: fmt # for formatting.yaml
+ image: pointcloudlibrary/fmt
+ - container: doc # for documentation.yaml
+ image: pointcloudlibrary/doc
+ - container: env1804 # for tutorials.yaml
+ image: pointcloudlibrary/env:18.04
+
+stages:
+ - stage: formatting
+ displayName: Formatting
+ # if docs pipeline triggered by build_gcc stage,
+ # the formatting stage has already run, thus it
+ # won't run for a second time here.
+ condition: ne(variables['Build.Reason'], 'ResourceTrigger')
+ jobs:
+ - template: formatting.yaml
+
+ - stage: documentation
+ displayName: Documentation
+ condition: in(dependencies.formatting.result, 'Succeeded', 'SucceededWithIssues', 'Skipped')
+ jobs:
+ - template: documentation.yaml
+
+ - stage: tutorials
+ displayName: Tutorials
+ condition: in(dependencies.documentation.result, 'Succeeded', 'SucceededWithIssues')
+ jobs:
+ - template: tutorials.yaml
- job: documentation
displayName: Generate Documentation
pool:
- vmImage: 'Ubuntu 16.04'
+ vmImage: 'Ubuntu 20.04'
container: doc
variables:
BUILD_DIR: '$(Agent.BuildDirectory)/build'
paths:
include:
- .dev/docker/env/Dockerfile
+ - .dev/docker/windows
- .ci/azure-pipelines/env.yml
pr:
paths:
include:
- .dev/docker/env/Dockerfile
+ - .dev/docker/windows
- .ci/azure-pipelines/env.yml
schedules:
dockerHubID: "pointcloudlibrary"
jobs:
-- job: BuildAndPush
+- job: BuildAndPushUbuntu
timeoutInMinutes: 360
displayName: "Env"
pool:
vmImage: 'ubuntu-latest'
strategy:
matrix:
- Ubuntu 16.04:
- CUDA_VERSION: 9.2
- UBUNTU_DISTRO: 16.04
- USE_CUDA: false
- VTK_VERSION: 6
- tag: 16.04
Ubuntu 18.04:
CUDA_VERSION: 10.2
UBUNTU_DISTRO: 18.04
USE_CUDA: true
VTK_VERSION: 6
- tag: 18.04
+ TAG: 18.04
Ubuntu 20.04:
- CUDA_VERSION: 11
+ CUDA_VERSION: 11.2.1
UBUNTU_DISTRO: 20.04
VTK_VERSION: 7
- # nvidia-cuda docker image has not been released for 20.04 yet
+ USE_CUDA: true
+ TAG: 20.04
+ Ubuntu 20.10:
+ CUDA_VERSION: 11.2.1
+ UBUNTU_DISTRO: 20.10
+ VTK_VERSION: 7
+ # nvidia-cuda docker image has not been released for 20.10 yet
+ USE_CUDA: ""
+ TAG: 20.10
+ Ubuntu 21.04:
+ CUDA_VERSION: 11.2.1
+ UBUNTU_DISTRO: 21.04
+ VTK_VERSION: 9
+ # nvidia-cuda docker image has not been released for 21.04 yet
USE_CUDA: ""
- tag: 20.04
+ TAG: 21.04
steps:
- task: Docker@2
displayName: "Build docker image"
--build-arg UBUNTU_DISTRO=$(UBUNTU_DISTRO)
--build-arg USE_CUDA=$(USE_CUDA)
--build-arg VTK_VERSION=$(VTK_VERSION)
- -t $(dockerHubID)/env:$(tag)
+ -t $(dockerHubID)/env:$(TAG)
dockerfile: '$(Build.SourcesDirectory)/.dev/docker/env/Dockerfile'
- tags: "$(tag)"
+ tags: "$(TAG)"
- script: |
set -x
- docker run --rm -v "$(Build.SourcesDirectory)":/pcl $(dockerHubID)/env:$(tag) bash -c ' \
+ docker run --rm -v "$(Build.SourcesDirectory)":/pcl $(dockerHubID)/env:$(TAG) bash -c ' \
mkdir /pcl/build && cd /pcl/build && \
cmake /pcl \
-DCMAKE_BUILD_TYPE="Release" \
command: push
containerRegistry: $(dockerHub)
repository: $(dockerHubID)/env
- tags: "$(tag)"
+ tags: "$(TAG)"
+ condition: and(eq(variables['Build.Repository.Name'], 'PointCloudLibrary/pcl'),
+ eq(variables['Build.SourceBranch'], 'refs/heads/master'))
+- job: BuildAndPushWindows
+ timeoutInMinutes: 360
+ displayName: "Env"
+ pool:
+ vmImage: 'windows-2019'
+ strategy:
+ matrix:
+ Winx86:
+ PLATFORM: x86
+ TAG: winx86
+ GENERATOR: "'Visual Studio 16 2019' -A Win32"
+ VCPKGCOMMIT: 2bc10eae2fb0b8c7c098325c4e9d82aa5d0329d9
+ Winx64:
+ PLATFORM: x64
+ TAG: winx64
+ GENERATOR: "'Visual Studio 16 2019' -A x64"
+ VCPKGCOMMIT: master
+ steps:
+ - task: Docker@2
+ displayName: "Build docker image"
+ inputs:
+ command: build
+ arguments: |
+ --no-cache
+ --build-arg PLATFORM=$(PLATFORM)
+ --build-arg VCPKGCOMMIT=$(VCPKGCOMMIT)
+ -t $(dockerHubID)/env:$(TAG)
+ dockerfile: '$(Build.SourcesDirectory)/.dev/docker/windows/Dockerfile'
+ tags: "$(TAG)"
+
+ - script: >
+ docker run --rm -v "$(Build.SourcesDirectory)":c:\pcl $(dockerHubID)/env:$(TAG)
+ powershell -command "mkdir c:\pcl\build; cd c:\pcl\build;
+ cmake c:\pcl -G$(GENERATOR)
+ -DVCPKG_TARGET_TRIPLET=$(PLATFORM)-windows-rel
+ -DCMAKE_BUILD_TYPE='Release'
+ -DCMAKE_TOOLCHAIN_FILE=c:\vcpkg\scripts\buildsystems\vcpkg.cmake
+ -DPCL_ONLY_CORE_POINT_TYPES=ON
+ -DBUILD_io:BOOL=OFF
+ -DBUILD_kdtree:BOOL=OFF;
+ cmake --build . "
+ displayName: 'Verify Dockerimage'
+ - task: Docker@2
+ displayName: "Push docker image"
+ inputs:
+ command: push
+ containerRegistry: $(dockerHub)
+ repository: $(dockerHubID)/env
+ tags: "$(TAG)"
condition: and(eq(variables['Build.Repository.Name'], 'PointCloudLibrary/pcl'),
eq(variables['Build.SourceBranch'], 'refs/heads/master'))
- job: formatting
displayName: Check code formatting
pool:
- vmImage: 'Ubuntu 16.04'
+ vmImage: 'Ubuntu 20.04'
container: fmt
steps:
- checkout: self
timeoutInMinutes: 360
pool:
vmImage: 'ubuntu-latest'
+ strategy:
+ matrix:
+ INT32:
+ PCL_INDEX_SIGNED: true
+ PCL_INDEX_SIZE: 32
+ UINT32:
+ PCL_INDEX_SIGNED: false
+ PCL_INDEX_SIZE: 32
+ INT64:
+ PCL_INDEX_SIGNED: true
+ PCL_INDEX_SIZE: 64
+ UINT64:
+ PCL_INDEX_SIGNED: false
+ PCL_INDEX_SIZE: 64
variables:
tag: "release"
steps:
arguments: |
--no-cache
-t $(dockerHubID)/release:$(tag)
+ --build-arg PCL_INDEX_SIGNED=$(PCL_INDEX_SIGNED)
+ --build-arg PCL_INDEX_SIZE=$(PCL_INDEX_SIZE)
dockerfile: '$(Build.SourcesDirectory)/.dev/docker/release/Dockerfile'
tags: "$(tag)"
- stage: ROS
- job: tutorials
displayName: Building Tutorials
pool:
- vmImage: 'Ubuntu 16.04'
- container: env1604
+ vmImage: 'Ubuntu 20.04'
+ container: env1804
timeoutInMinutes: 0
variables:
BUILD_DIR: '$(Agent.BuildDirectory)/build'
TUTORIAL_BUILD_DIR="$BUILD_DIR/$NAME"
mkdir -p "$TUTORIAL_BUILD_DIR" && cd "$TUTORIAL_BUILD_DIR" || exit
echo "Configuring tutorial: $NAME"
- if ! cmake "$TUTORIAL_SOURCE_DIR" -DPCL_DIR="$INSTALL_DIR" -DCMAKE_CXX_FLAGS="-Werror"; then
+ if ! cmake "$TUTORIAL_SOURCE_DIR" -DPCL_DIR="$INSTALL_DIR" -DCMAKE_CXX_FLAGS="-Wall -Wextra -Wpedantic -Werror"; then
STATUS="cmake error"
else
echo "Building tutorial: $NAME"
RUN apt-get update \
&& apt-get install -y \
+ xvfb \
cmake \
g++ \
clang \
libflann-dev \
libglew-dev \
libgtest-dev \
+ libbenchmark-dev \
libopenni-dev \
libopenni2-dev \
libproj-dev \
ARG VTK_VERSION=7
ENV DEBIAN_FRONTEND=noninteractive
+ARG PCL_INDEX_SIGNED=true
+ARG PCL_INDEX_SIZE=32
+
# Add sources so we can just install build-dependencies of PCL
RUN sed -i 's/^deb \(.*\)$/deb \1\ndeb-src \1/' /etc/apt/sources.list \
&& apt update \
-DBUILD_tools=ON -DBUILD_tracking=ON -DBUILD_visualization=ON \
-DBUILD_apps_cloud_composer=OFF -DBUILD_apps_modeler=ON \
-DBUILD_apps_point_cloud_editor=ON -DBUILD_apps_in_hand_scanner=ON \
+ -DPCL_INDEX_SIGNED=${PCL_INDEX_SIGNED} \
+ -DPCL_INDEX_SIZE=${PCL_INDEX_SIZE} \
&& make install -j2 \
&& cd \
&& rm -fr pcl
--- /dev/null
+# escape=`
+
+FROM mcr.microsoft.com/windows/servercore:ltsc2019
+
+# Use "--build-arg platform=x64" for 64 bit or x86 for 32 bit.
+ARG PLATFORM
+
+# Use to set specific commit to checkout
+ARG VCPKGCOMMIT
+
+# Download channel for fixed install.
+ARG CHANNEL_BASE_URL=https://aka.ms/vs/16/release
+
+ADD $CHANNEL_BASE_URL/channel C:\TEMP\VisualStudio.chman
+
+SHELL ["powershell", "-Command", "$ErrorActionPreference = 'Stop'; $ProgressPreference = 'SilentlyContinue';"]
+
+# Download and install Build Tools for Visual Studio 2019 for native desktop
+RUN wget $Env:CHANNEL_BASE_URL/vs_buildtools.exe -OutFile 'C:\TEMP\vs_buildtools.exe'; `
+ Start-Process -FilePath C:\TEMP\vs_buildtools.exe -ArgumentList `
+ "--quiet", `
+ "--norestart", `
+ "--nocache", `
+ "--installPath", `
+ "C:\BuildTools", `
+ "--wait", `
+ "--channelUri", `
+ "C:\TEMP\VisualStudio.chman", `
+ "--installChannelUri", `
+ "C:\TEMP\VisualStudio.chman", `
+ "--add", `
+ "Microsoft.VisualStudio.Workload.VCTools", `
+ "--includeRecommended" `
+ -Wait -PassThru; `
+ del c:\temp\vs_buildtools.exe;
+
+RUN iex ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/install.ps1')); `
+ choco install cmake git --installargs 'ADD_CMAKE_TO_PATH=System' -y --no-progress
+
+RUN git clone https://github.com/microsoft/vcpkg.git; cd vcpkg; git checkout $Env:VCPKGCOMMIT;
+
+# To explicit set VCPKG to only build Release version of the libraries.
+COPY $PLATFORM'-windows-rel.cmake' 'c:\vcpkg\triplets\'$PLATFORM'-windows-rel.cmake'
+
+RUN cd .\vcpkg; `
+ .\bootstrap-vcpkg.bat; `
+ .\vcpkg install boost flann eigen3 qhull vtk[qt,opengl] gtest benchmark --triplet $Env:PLATFORM-windows-rel --clean-after-build;
--- /dev/null
+set(VCPKG_TARGET_ARCHITECTURE x64)
+set(VCPKG_CRT_LINKAGE dynamic)
+set(VCPKG_LIBRARY_LINKAGE dynamic)
+set(VCPKG_BUILD_TYPE release)
--- /dev/null
+set(VCPKG_TARGET_ARCHITECTURE x86)
+set(VCPKG_CRT_LINKAGE dynamic)
+set(VCPKG_LIBRARY_LINKAGE dynamic)
+set(VCPKG_BUILD_TYPE release)
format() {
# don't use a directory with whitespace
- local whitelist="apps/3d_rec_framework apps/modeler 2d ml octree simulation stereo tracking"
+ local whitelist="apps/3d_rec_framework apps/include apps/modeler apps/src benchmarks 2d geometry ml octree simulation stereo tracking registration gpu/containers"
local PCL_DIR="${2}"
local formatter="${1}"
)
if(${VTK_FOUND})
- set(VTK_USE_FILE "${VTK_USE_FILE}" CACHE INTERNAL "VTK_USE_FILE")
- include("${VTK_USE_FILE}")
set(VTK_IO_TARGET_LINK_LIBRARIES vtkCommon vtkWidgets vtkIO vtkImaging)
endif()
# ChangeList
+## = 1.12.0 (2021.07.07) =
+
+PCL 1.12.0 enables custom index size and type, from `int16_t` to `uint64_t`, allowing
+users to have as small or large clouds as they wish. 1.12 also comes with improved
+support for VTK and CUDA, along with making existing functionality more user friendly.
+
+This is all on top of the usual bug-fixes and performance improvements across the board
+
+### Notable changes
+
+**New features** *added to PCL*
+
+* **[sample_consensus]** Add SIMD implementations to some countWithinDistance functions [[#3519](https://github.com/PointCloudLibrary/pcl/pull/3519)]
+* **[io]** Enable Real Sense 2 grabber for all platforms [[#4471](https://github.com/PointCloudLibrary/pcl/pull/4471)]
+* **[visualization]** add ellipsoid shape to pcl_visualizer [[#4531](https://github.com/PointCloudLibrary/pcl/pull/4531)]
+* **[common]** Add `constexpr` to static member functions for point types, add macro for `if constexpr` [[#4735](https://github.com/PointCloudLibrary/pcl/pull/4735)]
+* **[ci]** Use windows docker image in CI. [[#4426](https://github.com/PointCloudLibrary/pcl/pull/4426)]
+* **[common]** Add pcl log stream macros [[#4595](https://github.com/PointCloudLibrary/pcl/pull/4595)]
+
+**Deprecation** *of public APIs, scheduled to be removed after two minor releases*
+
+* **[common]** Modify index type for vertices [[#4256](https://github.com/PointCloudLibrary/pcl/pull/4256)]
+* **[gpu]** Add square distances to GPU knnSearch API [[#4322](https://github.com/PointCloudLibrary/pcl/pull/4322)]
+* **[gpu]** Add square distances to ApproxNearestSearch [[#4340](https://github.com/PointCloudLibrary/pcl/pull/4340)]
+* Deprecate unused ease-of-internal-use headers [[#4367](https://github.com/PointCloudLibrary/pcl/pull/4367)]
+* **[registration]** Deprecate `TransformationEstimationDQ` in favor of `TransformationEstimationDualQuaternion` [[#4425](https://github.com/PointCloudLibrary/pcl/pull/4425)]
+* **[segmentation]** Deprecate unused `max_label` in `extractLabeledEuclideanClusters` [[#4105](https://github.com/PointCloudLibrary/pcl/pull/4105)]
+* **[surface]** MLS: correct typo in `principle` by using `principal` instead [[#4505](https://github.com/PointCloudLibrary/pcl/pull/4505)]
+* Deprecate unneeded meta-headers [[#4628](https://github.com/PointCloudLibrary/pcl/pull/4628)]
+* **[apps][tracking]** pyramidal klt: switch keypoints_status_ to int vector [[#4681](https://github.com/PointCloudLibrary/pcl/pull/4681)]
+* Properly remove remaining items deprecated for version 1.12, deprecate `uniform_sampling.h` [[#4688](https://github.com/PointCloudLibrary/pcl/pull/4688)]
+* **[recognition]** Add deprecation for incorrectly installed headers [[#4650](https://github.com/PointCloudLibrary/pcl/pull/4650)]
+
+**Removal** *of the public APIs deprecated in previous releases*
+
+* Remove deprecated items as scheduled in preparation of v1.12 (except `concatenatePointCloud`) [[#4341](https://github.com/PointCloudLibrary/pcl/pull/4341)]
+* **[apps]** Remove unused code in persistence_utils.h [[#4500](https://github.com/PointCloudLibrary/pcl/pull/4500)]
+* Properly remove remaining items deprecated for version 1.12, deprecate `uniform_sampling.h` [[#4688](https://github.com/PointCloudLibrary/pcl/pull/4688)]
+
+**Behavior changes** *in classes, apps, or tools*
+
+* **[registration]** Don't move, or copy ICP [[#4167](https://github.com/PointCloudLibrary/pcl/pull/4167)]
+* **[common]** Fix PointXYZRGBA ctor, set A as 255 by default [[#4799](https://github.com/PointCloudLibrary/pcl/pull/4799)]
+
+**API changes** *that did not go through the proper deprecation and removal cycle*
+
+* **[common]** modify index type for PCLImage [[#4257](https://github.com/PointCloudLibrary/pcl/pull/4257)]
+* **[registration]** Don't move, or copy ICP [[#4167](https://github.com/PointCloudLibrary/pcl/pull/4167)]
+* **[kdtree]** KdTree: handle 0 or negative k for nearestKSearch [[#4430](https://github.com/PointCloudLibrary/pcl/pull/4430)]
+* **[common]** Use `std::array` instead of C-array for ColorLUT [[#4489](https://github.com/PointCloudLibrary/pcl/pull/4489)]
+* **[tracking]** Use SFINAE instead of relying on macro `PCL_TRACKING_NORMAL_SUPPORTED` [[#4643](https://github.com/PointCloudLibrary/pcl/pull/4643)]
+* **[gpu]** Export and template extract clusters [[#4196](https://github.com/PointCloudLibrary/pcl/pull/4196)]
+* **[common]** Added `namespace pcl` to free functions: `aligned_{malloc/free}` [[#4742](https://github.com/PointCloudLibrary/pcl/pull/4742)]
+
+**ABI changes** *that are still API compatible*
+
+* **[registration]** Refactoring and Bugfix of NDT [[#4180](https://github.com/PointCloudLibrary/pcl/pull/4180)]
+* **[common]** modify index types for PCLPointCloud2 [[#4199](https://github.com/PointCloudLibrary/pcl/pull/4199)]
+* **[common]** Modify index type for vertices [[#4256](https://github.com/PointCloudLibrary/pcl/pull/4256)]
+* **[common]** Modify index type for PCLPointField [[#4228](https://github.com/PointCloudLibrary/pcl/pull/4228)]
+* **[surface]** Enabled multithreading in Poisson surface reconstruction [[#4332](https://github.com/PointCloudLibrary/pcl/pull/4332)]
+* **[io]** Allow file_io to read large point clouds depending on PCL config [[#4331](https://github.com/PointCloudLibrary/pcl/pull/4331)]
+* **[sample_consensus]** Allow user to apply arbitrary constraint on models in sample consensus [[#4260](https://github.com/PointCloudLibrary/pcl/pull/4260)]
+* **[tracking]** Use SFINAE instead of relying on macro `PCL_TRACKING_NORMAL_SUPPORTED` [[#4643](https://github.com/PointCloudLibrary/pcl/pull/4643)]
+* **[features]** Move the init of static variables to library load time [[#4640](https://github.com/PointCloudLibrary/pcl/pull/4640)]
+* **[octree]** Octree2BufBase: Fix bug that contents from previous buffer appear in current buffer [[#4642](https://github.com/PointCloudLibrary/pcl/pull/4642)]
+
+### Changes grouped by module
+
+#### CMake:
+
+* Update `pcl_find_boost` to allow compilation with Boost 1.74 [[#4330](https://github.com/PointCloudLibrary/pcl/pull/4330)]
+* Variable needs to be expanded when checking for `EXT_DEPS` [[#4353](https://github.com/PointCloudLibrary/pcl/pull/4353)]
+* Update pcl_find_cuda.cmake to contain all supported architectures [[#4400](https://github.com/PointCloudLibrary/pcl/pull/4400)]
+* Add support for VTK 9 [[#4262](https://github.com/PointCloudLibrary/pcl/pull/4262)]
+* Refactor cmake find script of libusb [[#4483](https://github.com/PointCloudLibrary/pcl/pull/4483)]
+* Add AVX for windows [[#4598](https://github.com/PointCloudLibrary/pcl/pull/4598)]
+* Add SSE definitions for SSE 4.1 and 4.2 [[#4596](https://github.com/PointCloudLibrary/pcl/pull/4596)]
+
+#### libpcl_common:
+
+* **[ABI break]** modify index types for PCLPointCloud2 [[#4199](https://github.com/PointCloudLibrary/pcl/pull/4199)]
+* **[API break]** modify index type for PCLImage [[#4257](https://github.com/PointCloudLibrary/pcl/pull/4257)]
+* **[ABI break][deprecation]** Modify index type for vertices [[#4256](https://github.com/PointCloudLibrary/pcl/pull/4256)]
+* **[ABI break]** Modify index type for PCLPointField [[#4228](https://github.com/PointCloudLibrary/pcl/pull/4228)]
+* Allow PCL_DEPRECATED to detect and help remove deprecations before release [[#4336](https://github.com/PointCloudLibrary/pcl/pull/4336)]
+* Allow conversion of PointCloud with more than 32-bit size rows/columns [[#4343](https://github.com/PointCloudLibrary/pcl/pull/4343)]
+* Improve routing for `transformPointCloud` [[#4398](https://github.com/PointCloudLibrary/pcl/pull/4398)]
+* Correct typo in `transformPlane` [[#4396](https://github.com/PointCloudLibrary/pcl/pull/4396)]
+* **[API break]** Use `std::array` instead of C-array for ColorLUT [[#4489](https://github.com/PointCloudLibrary/pcl/pull/4489)]
+* Set header in two toPCLPointCloud2 functions [[#4538](https://github.com/PointCloudLibrary/pcl/pull/4538)]
+* Add more operators to `PointCloud` to prevent perf regression in refactoring [[#4397](https://github.com/PointCloudLibrary/pcl/pull/4397)]
+* Make sure that organized point clouds are still organized after transformPointCloud [[#4488](https://github.com/PointCloudLibrary/pcl/pull/4488)]
+* **[API break]** Added `namespace pcl` to free functions: `aligned_{malloc/free}` [[#4742](https://github.com/PointCloudLibrary/pcl/pull/4742)]
+* **[new feature]** Add `constexpr` to static member functions for point types, add macro for `if constexpr` [[#4735](https://github.com/PointCloudLibrary/pcl/pull/4735)]
+* Fix `PolygonMesh::concatenate` and its unit test [[#4745](https://github.com/PointCloudLibrary/pcl/pull/4745)]
+* **[behavior change]** Fix PointXYZRGBA ctor, set A as 255 by default [[#4799](https://github.com/PointCloudLibrary/pcl/pull/4799)]
+* Remove pseudo-template-instantiations in eigen.h to reduce compilation time [[#4788](https://github.com/PointCloudLibrary/pcl/pull/4788)]
+* **[new feature]** Add pcl log stream macros [[#4595](https://github.com/PointCloudLibrary/pcl/pull/4595)]
+
+#### libpcl_features:
+
+* **[ABI break]** Move the init of static variables to library load time [[#4640](https://github.com/PointCloudLibrary/pcl/pull/4640)]
+* Use correct cloud for checking finite-ness in fpfh [[#4720](https://github.com/PointCloudLibrary/pcl/pull/4720)]
+
+#### libpcl_filters:
+
+* Improve performance of median filter by using `nth_element` [[#4360](https://github.com/PointCloudLibrary/pcl/pull/4360)]
+* Fix the covariance calculation as suggested by @zxd123 [[#4466](https://github.com/PointCloudLibrary/pcl/pull/4466)]
+* Filters: fix wrong initialization of covariance in VoxelGridCovariance [[#4556](https://github.com/PointCloudLibrary/pcl/pull/4556)]
+* Fix application of setMinimumPointsNumberPerVoxel for PCLPointCloud2 implementation of VoxelGrid [[#4389](https://github.com/PointCloudLibrary/pcl/pull/4389)]
+* Adding tests for CropHull and using hull_cloud instead of input in getHullCloudRange [[#3976](https://github.com/PointCloudLibrary/pcl/pull/3976)]
+
+#### libpcl_gpu:
+
+* **[deprecation]** Add square distances to GPU knnSearch API [[#4322](https://github.com/PointCloudLibrary/pcl/pull/4322)]
+* **[deprecation]** Add square distances to ApproxNearestSearch [[#4340](https://github.com/PointCloudLibrary/pcl/pull/4340)]
+* **[API break]** Export and template extract clusters [[#4196](https://github.com/PointCloudLibrary/pcl/pull/4196)]
+* Update support for CUDA arch in CMake and `convertSMVer2Cores` [[#4748](https://github.com/PointCloudLibrary/pcl/pull/4748)]
+* Add ability to download contiguous chunk of memory to host using `Device{Array,Memory}` [[#4741](https://github.com/PointCloudLibrary/pcl/pull/4741)]
+* Speeding up GPU clustering using smarter download strategy and memory allocations [[#4677](https://github.com/PointCloudLibrary/pcl/pull/4677)]
+
+#### libpcl_io:
+
+* **[ABI break]** Allow file_io to read large point clouds depending on PCL config [[#4331](https://github.com/PointCloudLibrary/pcl/pull/4331)]
+* Improve PCD read performance (more than 50%) by reusing `istringstream` [[#4339](https://github.com/PointCloudLibrary/pcl/pull/4339)]
+* **[new feature]** Enable Real Sense 2 grabber for all platforms [[#4471](https://github.com/PointCloudLibrary/pcl/pull/4471)]
+* Throw error if the device bluffs about its capability [[#4141](https://github.com/PointCloudLibrary/pcl/pull/4141)]
+* Fix crash in Dinast Grabber due to bad initialization of device handle [[#4484](https://github.com/PointCloudLibrary/pcl/pull/4484)]
+* PLY face definition accepts uint fields as well [[#4492](https://github.com/PointCloudLibrary/pcl/pull/4492)]
+* Prevent segfault in vtk2mesh [[#4581](https://github.com/PointCloudLibrary/pcl/pull/4581)]
+* Prevent exception in PCDReader for misformed PCD files [[#4566](https://github.com/PointCloudLibrary/pcl/pull/4566)]
+* Enable arbitary size Indices for Octree module [[#4350](https://github.com/PointCloudLibrary/pcl/pull/4350)]
+* Fix addition of Carriage Return to PCD files. [[#4727](https://github.com/PointCloudLibrary/pcl/pull/4727)]
+* Support Ensenso SDK 3.0 for ensenso_grabber [[#4751](https://github.com/PointCloudLibrary/pcl/pull/4751)]
+* Specify no face elements in PLY files (from point cloud) to make them interoperable with VTK [[#4774](https://github.com/PointCloudLibrary/pcl/pull/4774)]
+
+#### libpcl_kdtree:
+
+* **[API break]** KdTree: handle 0 or negative k for nearestKSearch [[#4430](https://github.com/PointCloudLibrary/pcl/pull/4430)]
+
+#### libpcl_ml:
+
+* Fix un-initialized centroids bug (k-means) [[#4570](https://github.com/PointCloudLibrary/pcl/pull/4570)]
+
+#### libpcl_octree:
+
+* Enable arbitary size Indices for Octree module [[#4350](https://github.com/PointCloudLibrary/pcl/pull/4350)]
+* Fix problems in octree search functions when using dynamic depth [[#4657](https://github.com/PointCloudLibrary/pcl/pull/4657)]
+* **[ABI break]** Octree2BufBase: Fix bug that contents from previous buffer appear in current buffer [[#4642](https://github.com/PointCloudLibrary/pcl/pull/4642)]
+
+#### libpcl_outofcore:
+
+* Fix compile issue due to missing include under MSVC 2019 [[#4755](https://github.com/PointCloudLibrary/pcl/pull/4755)]
+
+#### libpcl_recognition:
+
+* **[deprecation]** Add deprecation for incorrectly installed headers [[#4650](https://github.com/PointCloudLibrary/pcl/pull/4650)]
+
+#### libpcl_registration:
+
+* **[ABI break]** Refactoring and Bugfix of NDT [[#4180](https://github.com/PointCloudLibrary/pcl/pull/4180)]
+* **[API break][behavior change]** Don't move, or copy ICP [[#4167](https://github.com/PointCloudLibrary/pcl/pull/4167)]
+* **[deprecation]** Deprecate `TransformationEstimationDQ` in favor of `TransformationEstimationDualQuaternion` [[#4425](https://github.com/PointCloudLibrary/pcl/pull/4425)]
+* Fix force no recompute [[#4535](https://github.com/PointCloudLibrary/pcl/pull/4535)]
+* Skip non-finite points for Pyramid Feature Matching [[#4711](https://github.com/PointCloudLibrary/pcl/pull/4711)]
+
+#### libpcl_sample_consensus:
+
+* **[ABI break]** Allow user to apply arbitrary constraint on models in sample consensus [[#4260](https://github.com/PointCloudLibrary/pcl/pull/4260)]
+* Improve logging errors during sample consensus model registration [[#4381](https://github.com/PointCloudLibrary/pcl/pull/4381)]
+* **[new feature]** Add SIMD implementations to some countWithinDistance functions [[#3519](https://github.com/PointCloudLibrary/pcl/pull/3519)]
+* Faster sample consensus functions [[#4424](https://github.com/PointCloudLibrary/pcl/pull/4424)]
+* Fix and improve MLESAC [[#4575](https://github.com/PointCloudLibrary/pcl/pull/4575)]
+* Improve logging in module `sample_consensus` [[#4261](https://github.com/PointCloudLibrary/pcl/pull/4261)]
+
+#### libpcl_search:
+
+* Faster organized search [[#4496](https://github.com/PointCloudLibrary/pcl/pull/4496)]
+* Add access to boxSearch [[#4282](https://github.com/PointCloudLibrary/pcl/pull/4282)]
+
+#### libpcl_segmentation:
+
+* **[deprecation]** Deprecate unused `max_label` in `extractLabeledEuclideanClusters` [[#4105](https://github.com/PointCloudLibrary/pcl/pull/4105)]
+* Fix the dotproduct calculation in `extractEuclideanClusters` for smooth surfaces [[#4162](https://github.com/PointCloudLibrary/pcl/pull/4162)]
+* Make euclidean clustering with normals faster [[#4551](https://github.com/PointCloudLibrary/pcl/pull/4551)]
+
+#### libpcl_surface:
+
+* **[ABI break]** Enabled multithreading in Poisson surface reconstruction [[#4332](https://github.com/PointCloudLibrary/pcl/pull/4332)]
+* Add stdlib header for malloc in poisson (bugfix for gcc-5) [[#4376](https://github.com/PointCloudLibrary/pcl/pull/4376)]
+* Always update counter and prevent overflow access in poisson4 octree [[#4316](https://github.com/PointCloudLibrary/pcl/pull/4316)]
+* Add missing include to nurbs_solve_umfpack.cpp [[#4571](https://github.com/PointCloudLibrary/pcl/pull/4571)]
+* **[deprecation]** MLS: correct typo in `principle` by using `principal` instead [[#4505](https://github.com/PointCloudLibrary/pcl/pull/4505)]
+
+#### libpcl_visualization:
+
+* Add support for VTK 9 [[#4262](https://github.com/PointCloudLibrary/pcl/pull/4262)]
+* **[new feature]** add ellipsoid shape to pcl_visualizer [[#4531](https://github.com/PointCloudLibrary/pcl/pull/4531)]
+
+#### PCL Apps:
+
+* **[removal]** Remove unused code in persistence_utils.h [[#4500](https://github.com/PointCloudLibrary/pcl/pull/4500)]
+* **[deprecation]** pyramidal klt: switch keypoints_status_ to int vector [[#4681](https://github.com/PointCloudLibrary/pcl/pull/4681)]
+
+#### PCL Docs:
+
+* Update documentation to be coherent with the style guide [[#4771](https://github.com/PointCloudLibrary/pcl/pull/4771)]
+
+#### PCL Tutorials:
+
+* Replace PassThrough with removeNaNFromPointCloud in 3 tutorials [[#4760](https://github.com/PointCloudLibrary/pcl/pull/4760)]
+
+#### PCL Tools:
+
+* Fix virtual scanner [[#4730](https://github.com/PointCloudLibrary/pcl/pull/4730)]
+
+#### CI:
+
+* Make windows build on c:\ drive to fix out-of-disk-space errors [[#4382](https://github.com/PointCloudLibrary/pcl/pull/4382)]
+* **[new feature]** Use windows docker image in CI. [[#4426](https://github.com/PointCloudLibrary/pcl/pull/4426)]
+
## = 1.11.1 (13.08.2020) =
Apart from the usual serving of bug-fixes and speed improvements, PCL 1.11.1 brings in
set(CMAKE_BUILD_TYPE RelWithDebInfo CACHE STRING "build type default to RelWithDebInfo, set to Release to improve performance" FORCE)
endif()
-project(PCL VERSION 1.11.1)
+project(PCL VERSION 1.12.0)
string(TOLOWER ${PROJECT_NAME} PROJECT_NAME_LOWER)
### ---[ Find universal dependencies
PCL_CHECK_FOR_SSE()
endif()
+# check for AVX flags for windows
+if(PCL_ENABLE_AVX AND "${CMAKE_CXX_FLAGS}" STREQUAL "${CMAKE_CXX_FLAGS_DEFAULT}")
+ include("${PCL_SOURCE_DIR}/cmake/pcl_find_avx.cmake")
+ PCL_CHECK_FOR_AVX()
+endif()
+
# ---[ Unix/Darwin/Windows specific flags
if(CMAKE_COMPILER_IS_GNUCXX)
if("${CMAKE_CXX_FLAGS}" STREQUAL "${CMAKE_CXX_FLAGS_DEFAULT}")
if (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 7)
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wabi=11")
+ string(APPEND CMAKE_CXX_FLAGS " -Wabi=11")
else()
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wabi")
- endif()
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -Wno-unknown-pragmas -fno-strict-aliasing -Wno-format-extra-args -Wno-sign-compare -Wno-invalid-offsetof -Wno-conversion ${SSE_FLAGS_STR}")
- if(PCL_WARNINGS_ARE_ERRORS)
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror")
+ string(APPEND CMAKE_CXX_FLAGS " -Wabi")
endif()
+ string(APPEND CMAKE_CXX_FLAGS " -Wall -Wextra -Wno-unknown-pragmas -fno-strict-aliasing -Wno-format-extra-args -Wno-sign-compare -Wno-invalid-offsetof -Wno-conversion ${SSE_FLAGS}")
+ endif()
+
+ if(PCL_WARNINGS_ARE_ERRORS)
+ string(APPEND CMAKE_CXX_FLAGS " -Werror -fno-strict-aliasing")
endif()
if("${CMAKE_SHARED_LINKER_FLAGS}" STREQUAL "" AND NOT CMAKE_SYSTEM_NAME STREQUAL "Darwin")
if(WIN32)
if(PCL_SHARED_LIBS)
- set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,--export-all-symbols -Wl,--enable-auto-import")
+ string(APPEND CMAKE_SHARED_LINKER_FLAGS " -Wl,--export-all-symbols -Wl,--enable-auto-import")
if(MINGW)
add_definitions("-DBOOST_THREAD_USE_LIB")
- set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,--allow-multiple-definition")
+ string(APPEND CMAKE_SHARED_LINKER_FLAGS " -Wl,--allow-multiple-definition")
endif()
else()
add_definitions("-DBOOST_LIB_DIAGNOSTIC -DBOOST_THREAD_USE_LIB")
add_compile_options(/bigobj)
if("${CMAKE_CXX_FLAGS}" STREQUAL "${CMAKE_CXX_FLAGS_DEFAULT}")
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /fp:precise /wd4800 /wd4521 /wd4251 /wd4275 /wd4305 /wd4355 ${SSE_FLAGS_STR}")
+ string(APPEND CMAKE_CXX_FLAGS " /fp:precise /wd4800 /wd4521 /wd4251 /wd4275 /wd4305 /wd4355 ${SSE_FLAGS} ${AVX_FLAGS}")
# Add extra code generation/link optimizations
if(CMAKE_MSVC_CODE_LINK_OPTIMIZATION AND (NOT BUILD_CUDA) AND (NOT BUILD_GPU))
- set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /GL")
- set(CMAKE_SHARED_LINKER_FLAGS_RELEASE "${CMAKE_SHARED_LINKER_FLAGS_RELEASE} /LTCG /OPT:REF")
- set(CMAKE_EXE_LINKER_FLAGS_RELEASE "${CMAKE_EXE_LINKER_FLAGS_RELEASE} /LTCG")
+ string(APPEND CMAKE_CXX_FLAGS_RELEASE " /GL")
+ string(APPEND CMAKE_SHARED_LINKER_FLAGS_RELEASE " /LTCG /OPT:REF")
+ string(APPEND CMAKE_EXE_LINKER_FLAGS_RELEASE " /LTCG")
else()
message("Global optimizations /GL has been turned off, as it doesn't work with nvcc/thrust")
endif()
# /MANIFEST:NO") # please, don't disable manifest generation, otherwise crash at start for vs2008
if(PCL_WARNINGS_ARE_ERRORS)
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /WX")
+ string(APPEND CMAKE_CXX_FLAGS " /WX")
endif()
include(ProcessorCount)
# Usage of COMPILE_LANGUAGE generator expression for MSVC in add_compile_options requires at least CMake 3.11, see https://gitlab.kitware.com/cmake/cmake/issues/17435
if(MSVC_MP EQUAL 0)
# MSVC_MP is 0 in case the information cannot be determined by ProcessorCount => fallback
- set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /MP")
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /MP")
+ string(APPEND CMAKE_C_FLAGS " /MP")
+ string(APPEND CMAKE_CXX_FLAGS " /MP")
elseif(MSVC_MP GREATER 1)
- set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /MP${MSVC_MP}")
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /MP${MSVC_MP}")
+ string(APPEND CMAKE_C_FLAGS " /MP${MSVC_MP}")
+ string(APPEND CMAKE_CXX_FLAGS " /MP${MSVC_MP}")
endif()
else()
if(MSVC_MP EQUAL 0)
endif()
if(CMAKE_GENERATOR STREQUAL "Ninja")
- set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /FS")
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /FS")
+ string(APPEND CMAKE_C_FLAGS " /FS")
+ string(APPEND CMAKE_CXX_FLAGS " /FS")
endif()
endif()
if("${CMAKE_CXX_FLAGS}" STREQUAL "")
set(CMAKE_CXX_FLAGS "-ftemplate-depth=1024 -Qunused-arguments -Wno-invalid-offsetof ${SSE_FLAGS_STR}") # Unfortunately older Clang versions do not have this: -Wno-unnamed-type-template-args
if(APPLE AND WITH_CUDA AND CUDA_FOUND)
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libstdc++")
+ string(APPEND CMAKE_CXX_FLAGS " -stdlib=libstdc++")
endif()
endif()
set(CLANG_LIBRARIES "stdc++")
find_package(OpenMP COMPONENTS C CXX)
endif()
if(OpenMP_FOUND)
- set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}")
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}")
+ string(APPEND CMAKE_C_FLAGS " ${OpenMP_C_FLAGS}")
+ string(APPEND CMAKE_CXX_FLAGS " ${OpenMP_CXX_FLAGS}")
if(${CMAKE_VERSION} VERSION_LESS "3.7")
message(STATUS "Found OpenMP")
else()
set(OPENMP_DLL VCOMP140)
endif()
if(OPENMP_DLL)
- set(CMAKE_SHARED_LINKER_FLAGS_DEBUG "${CMAKE_SHARED_LINKER_FLAGS_DEBUG} /DELAYLOAD:${OPENMP_DLL}D.dll")
- set(CMAKE_SHARED_LINKER_FLAGS_RELEASE "${CMAKE_SHARED_LINKER_FLAGS_RELEASE} /DELAYLOAD:${OPENMP_DLL}.dll")
+ string(APPEND CMAKE_SHARED_LINKER_FLAGS_DEBUG " /DELAYLOAD:${OPENMP_DLL}D.dll")
+ string(APPEND CMAKE_SHARED_LINKER_FLAGS_RELEASE " /DELAYLOAD:${OPENMP_DLL}.dll")
else()
message(WARNING "Delay loading flag for OpenMP DLL is invalid.")
endif()
include_directories(SYSTEM ${EIGEN_INCLUDE_DIRS})
# FLANN (required)
-if(NOT PCL_SHARED_LIBS OR ((WIN32 AND NOT MINGW) AND NOT PCL_BUILD_WITH_FLANN_DYNAMIC_LINKING_WIN32))
- set(FLANN_USE_STATIC ON)
+find_package(FLANN 1.9.1 REQUIRED)
+if(NOT (${FLANN_LIBRARY_TYPE} MATCHES ${PCL_FLANN_REQUIRED_TYPE}) AND NOT (${PCL_FLANN_REQUIRED_TYPE} MATCHES "DONTCARE"))
+ message(FATAL_ERROR "Flann was selected with ${PCL_FLANN_REQUIRED_TYPE} but found as ${FLANN_LIBRARY_TYPE}")
endif()
-find_package(FLANN 1.7.0 REQUIRED)
-# libusb-1.0
+# libusb
option(WITH_LIBUSB "Build USB RGBD-Camera drivers" TRUE)
if(WITH_LIBUSB)
- find_package(libusb-1.0)
- if(LIBUSB_1_FOUND)
- include_directories(SYSTEM "${LIBUSB_1_INCLUDE_DIR}")
- endif()
+ find_package(libusb)
endif()
# Dependencies for different grabbers
include("${PCL_SOURCE_DIR}/cmake/pcl_find_cuda.cmake")
endif()
-option(WITH_QT "Build QT Front-End" TRUE)
-if(WITH_QT)
- find_package(Qt5 COMPONENTS Concurrent OpenGL Widgets QUIET)
-endif()
-# Find VTK
+# Reset VTK_FOUND to off
+set(VTK_FOUND OFF)
+# Find VTK - VTK has to be found before Qt, otherwise it can overwrite Qt variables
option(WITH_VTK "Build VTK-Visualizations" TRUE)
-if(WITH_VTK AND NOT ANDROID)
- set(PCL_VTK_COMPONENTS
- vtkChartsCore
- vtkCommonCore
- vtkCommonDataModel
- vtkCommonExecutionModel
- vtkFiltersCore
- vtkFiltersExtraction
- vtkFiltersModeling
- vtkImagingCore
- vtkImagingSources
- vtkInteractionStyle
- vtkInteractionWidgets
- vtkIOCore
- vtkIOGeometry
- vtkIOImage
- vtkIOLegacy
- vtkIOPLY
- vtkRenderingAnnotation
- vtkRenderingLOD
- vtkViewsContext2D
- )
- find_package(VTK COMPONENTS ${PCL_VTK_COMPONENTS})
- if(VTK_FOUND AND ("${VTK_VERSION}" VERSION_LESS 6.2))
- message(WARNING "The minimum required version of VTK is 6.2, but found ${VTK_VERSION}")
- set(VTK_FOUND FALSE)
+if(WITH_VTK)
+ if(ANDROID)
+ message(WARNING "VTK is not supported on Android.")
+ else()
+ include("${PCL_SOURCE_DIR}/cmake/pcl_find_vtk.cmake")
endif()
+endif()
- if(VTK_FOUND)
- if(NOT DEFINED VTK_RENDERING_BACKEND)
- # On old VTK versions this variable does not exist. In this case it is
- # safe to assume OpenGL backend
- set(VTK_RENDERING_BACKEND "OpenGL")
- endif()
- list(APPEND PCL_VTK_COMPONENTS vtkRenderingContext${VTK_RENDERING_BACKEND})
-
- if(WITH_QT)
- if(";${VTK_MODULES_ENABLED};" MATCHES ";vtkGUISupportQt;" AND ";${VTK_MODULES_ENABLED};" MATCHES ";vtkRenderingQt;")
- set(QVTK_FOUND ON)
- list(APPEND PCL_VTK_COMPONENTS vtkRenderingQt vtkGUISupportQt)
- else()
- unset(QVTK_FOUND)
- endif()
+#VTK can depend on Qt and search for its required version, so search after so we can overwrite Qt5_FOUND if the version we require is not found.
+option(WITH_QT "Build QT Front-End" TRUE)
+if(WITH_QT)
+ find_package(Qt5 5.9.5 COMPONENTS Concurrent OpenGL Widgets)
+ set(QT_DISABLE_PRECATED_BEFORE_VAL "0x050900")
+
+ if(Qt5_FOUND)
+ message(STATUS "Qt5 version: ${Qt5_VERSION}")
+ set(QT5_FOUND ${Qt5_FOUND})
+ #Set Cmake Auto features to skip .hh files
+ if(POLICY CMP0100)
+ cmake_policy(SET CMP0100 OLD)
endif()
+
+ get_property(core_def TARGET Qt5::Core PROPERTY INTERFACE_COMPILE_DEFINITIONS)
+ list(APPEND core_def "QT_DISABLE_DEPRECATED_BEFORE=${QT_DISABLE_PRECATED_BEFORE_VAL}")
+ set_property(TARGET Qt5::Core PROPERTY INTERFACE_COMPILE_DEFINITIONS ${core_def})
- find_package(VTK COMPONENTS ${PCL_VTK_COMPONENTS})
-
- message(STATUS "VTK_MAJOR_VERSION ${VTK_MAJOR_VERSION}, rendering backend: ${VTK_RENDERING_BACKEND}")
- if(PCL_SHARED_LIBS OR (NOT (PCL_SHARED_LIBS) AND NOT (VTK_BUILD_SHARED_LIBS)))
- if(VTK_USE_FILE)
- include(${VTK_USE_FILE})
- endif()
- message(STATUS "VTK found (include: ${VTK_INCLUDE_DIRS}, libs: ${VTK_LIBRARIES}")
- if(APPLE)
- option(VTK_USE_COCOA "Use Cocoa for VTK render windows" ON)
- mark_as_advanced(VTK_USE_COCOA)
- endif()
- if(${VTK_RENDERING_BACKEND} STREQUAL "OpenGL")
- set(VTK_RENDERING_BACKEND_OPENGL_VERSION "1")
- message(DEPRECATION "The rendering backend OpenGL is deprecated and not available anymore since VTK 8.2."
- "Please switch to the OpenGL2 backend instead, which is available since VTK 6.2."
- "Support of the deprecated backend will be dropped with PCL 1.13.")
- elseif(${VTK_RENDERING_BACKEND} STREQUAL "OpenGL2")
- set(VTK_RENDERING_BACKEND_OPENGL_VERSION "2")
- endif()
- else()
- set(VTK_FOUND OFF)
- message("Warning: You are to build PCL in STATIC but VTK is SHARED!")
- message("Warning: VTK disabled!")
- endif()
+ else()
+ message(STATUS "Qt5 is not found.")
endif()
else()
- set(VTK_FOUND OFF)
+ set(Qt5_FOUND FALSE)
endif()
-
#Find PCAP
option(WITH_PCAP "pcap file capabilities in Velodyne HDL driver" TRUE)
if(WITH_PCAP)
* [Discord Server](https://discord.gg/JFFMAXS) for live chat with
other members of the PCL community and casual discussions
-<!--
+<!--
* Mailing list: The [PCL Google Group](https://groups.google.com/forum/#!forum/point-cloud-library)
-->
**Please ask first** before embarking on any significant pull request (e.g.
implementing features, refactoring code), otherwise you risk spending a lot of
time working on something that the project's developers might not want to merge
-into the project. Please read the [tutorial on writing a new PCL class](http://pointclouds.org/documentation/tutorials/writing_new_classes.php#writing-new-classes) if you want to contribute a
+into the project. Please read the [tutorial on writing a new PCL class](https://pcl.readthedocs.io/projects/tutorials/en/latest/writing_new_classes.html) if you want to contribute a
brand new feature.
If you are new to Git, GitHub, or contributing to an open-source project, you
Please use the following checklist to make sure that your contribution is well
prepared for merging into PCL:
-1. Source code adheres to the coding conventions described in [PCL Style Guide](http://pointclouds.org/documentation/advanced/pcl_style_guide.php).
+1. Source code adheres to the coding conventions described in [PCL Style Guide](http://pcl.readthedocs.io/projects/advanced/en/latest/pcl_style_guide.html).
But if you modify existing code, do not change/fix style in the lines that
are not related to your contribution.
```cpp
/*
- * Software License Agreement (BSD License)
+ * SPDX-License-Identifier: BSD-3-Clause
*
* Point Cloud Library (PCL) - www.pointclouds.org
- * Copyright (c) 2014-, Open Perception, Inc.
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of the copyright holder(s) nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-, Open Perception Inc.
*
+ * All rights reserved
*/
```
endif()
set(Boost_ADDITIONAL_VERSIONS
"@Boost_MAJOR_VERSION@.@Boost_MINOR_VERSION@.@Boost_SUBMINOR_VERSION@" "@Boost_MAJOR_VERSION@.@Boost_MINOR_VERSION@"
- "1.73.0" "1.73" "1.72.0" "1.72" "1.71.0" "1.71" "1.70.0" "1.70"
- "1.69.0" "1.69" "1.68.0" "1.68" "1.67.0" "1.67" "1.66.0" "1.66" "1.65.1" "1.65.0" "1.65"
- "1.64.0" "1.64" "1.63.0" "1.63" "1.62.0" "1.62" "1.61.0" "1.61" "1.60.0" "1.60"
- "1.59.0" "1.59" "1.58.0" "1.58" "1.57.0" "1.57" "1.56.0" "1.56" "1.55.0" "1.55")
+ "1.76.0" "1.76" "1.75.0" "1.75"
+ "1.74.0" "1.74" "1.73.0" "1.73" "1.72.0" "1.72" "1.71.0" "1.71" "1.70.0" "1.70"
+ "1.69.0" "1.69" "1.68.0" "1.68" "1.67.0" "1.67" "1.66.0" "1.66" "1.65.1" "1.65.0" "1.65")
# Disable the config mode of find_package(Boost)
set(Boost_NO_BOOST_CMAKE ON)
- find_package(Boost 1.55.0 ${QUIET_} COMPONENTS @PCLCONFIG_AVAILABLE_BOOST_MODULES@)
+ find_package(Boost 1.65.0 ${QUIET_} COMPONENTS @PCLCONFIG_AVAILABLE_BOOST_MODULES@)
set(BOOST_FOUND ${Boost_FOUND})
set(BOOST_INCLUDE_DIRS "${Boost_INCLUDE_DIR}")
set(BOOST_LIBRARY_DIRS "${Boost_LIBRARY_DIRS}")
set(BOOST_LIBRARIES ${Boost_LIBRARIES})
if(WIN32 AND NOT MINGW)
- set(BOOST_DEFINITIONS ${BOOST_DEFINITIONS} -DBOOST_ALL_NO_LIB)
+ string(APPEND BOOST_DEFINITIONS -DBOOST_ALL_NO_LIB)
endif()
endmacro()
get_filename_component(EIGEN_ROOT "@EIGEN_INCLUDE_DIRS@" ABSOLUTE)
endif()
find_package(Eigen 3.1)
- set(EIGEN_DEFINITIONS ${EIGEN_DEFINITIONS})
endmacro()
#remove this as soon as qhull is shipped with FindQhull.cmake
endmacro()
macro(find_libusb)
- if(NOT WIN32)
- find_path(LIBUSB_1_INCLUDE_DIR
- NAMES libusb-1.0/libusb.h
- PATHS /usr/include /usr/local/include /opt/local/include /sw/include
- PATH_SUFFIXES libusb-1.0)
-
- find_library(LIBUSB_1_LIBRARY
- NAMES usb-1.0
- PATHS /usr/lib /usr/local/lib /opt/local/lib /sw/lib)
- find_package_handle_standard_args(libusb-1.0 LIBUSB_1_LIBRARY LIBUSB_1_INCLUDE_DIR)
- endif()
+ find_package(libusb)
endmacro()
macro(find_glew)
find_rssdk2()
elseif("${_lib}" STREQUAL "vtk")
find_VTK()
- elseif("${_lib}" STREQUAL "libusb-1.0")
+ elseif("${_lib}" STREQUAL "libusb")
find_libusb()
elseif("${_lib}" STREQUAL "glew")
find_glew()
if(WIN32 AND NOT MINGW)
# PCLConfig.cmake is installed to PCL_ROOT/cmake
get_filename_component(PCL_ROOT "${PCL_DIR}" PATH)
+ if(EXISTS "${PCL_ROOT}/3rdParty")
+ set(PCL_ALL_IN_ONE_INSTALLER ON)
+ endif()
else()
# PCLConfig.cmake is installed to PCL_ROOT/share/pcl-x.y
get_filename_component(PCL_ROOT "${CMAKE_CURRENT_LIST_DIR}/../.." ABSOLUTE)
# pcl_message("Found a PCL installation")
set(PCL_CONF_INCLUDE_DIR "${PCL_ROOT}/include/pcl-${PCL_VERSION_MAJOR}.${PCL_VERSION_MINOR}")
set(PCL_LIBRARY_DIRS "${PCL_ROOT}/@LIB_INSTALL_DIR@")
- if(EXISTS "${PCL_ROOT}/3rdParty")
- set(PCL_ALL_IN_ONE_INSTALLER ON)
- endif()
elseif(EXISTS "${PCL_ROOT}/include/pcl/pcl_config.h")
# Found a non-standard (likely ANDROID) PCL installation
# pcl_message("Found a PCL installation")
set(PCL_CONF_INCLUDE_DIR "${PCL_ROOT}/include")
set(PCL_LIBRARY_DIRS "${PCL_ROOT}/lib")
- if(EXISTS "${PCL_ROOT}/3rdParty")
- set(PCL_ALL_IN_ONE_INSTALLER ON)
- endif()
elseif(EXISTS "${PCL_DIR}/include/pcl/pcl_config.h")
# Found PCLConfig.cmake in a build tree of PCL
# pcl_message("PCL found into a build tree.")
#pcl_message("No include directory found for pcl_${component}.")
endif()
+ set(FPHSA_NAME_MISMATCHED 1) # Suppress warnings, see https://cmake.org/cmake/help/v3.17/module/FindPackageHandleStandardArgs.html
# Skip find_library for header only modules
list(FIND pcl_header_only_components ${component} _is_header_only)
if(_is_header_only EQUAL -1)
find_package_handle_standard_args(PCL_${COMPONENT} DEFAULT_MSG
PCL_${COMPONENT}_INCLUDE_DIR)
endif()
+ unset(FPHSA_NAME_MISMATCHED)
if(PCL_${COMPONENT}_FOUND)
if(NOT "${PCL_${COMPONENT}_INCLUDE_DIRS}" STREQUAL "")
[![Release][release-image]][releases]
[![License][license-image]][license]
-[release-image]: https://img.shields.io/badge/release-1.11.1-green.svg?style=flat
+[release-image]: https://img.shields.io/badge/release-1.12.0-green.svg?style=flat
[releases]: https://github.com/PointCloudLibrary/pcl/releases
[license-image]: https://img.shields.io/badge/license-BSD-green.svg?style=flat
Continuous integration
----------------------
[ci-latest-build]: https://dev.azure.com/PointCloudLibrary/pcl/_build/latest?definitionId=9&branchName=master
-[ci-ubuntu-16.04]: https://dev.azure.com/PointCloudLibrary/pcl/_apis/build/status/9?branchName=master&stageName=Build%20GCC&jobName=Ubuntu&configuration=Ubuntu%2016.04%20GCC&label=Ubuntu%2016.04%20GCC
-[ci-ubuntu-18.04]: https://dev.azure.com/PointCloudLibrary/pcl/_apis/build/status/9?branchName=master&stageName=Build%20Clang&jobName=Ubuntu&configuration=Ubuntu%2018.04%20Clang&label=Ubuntu%2018.04%20Clang
-[ci-ubuntu-20.04]: https://dev.azure.com/PointCloudLibrary/pcl/_apis/build/status/9?branchName=master&stageName=Build%20GCC&jobName=Ubuntu&configuration=Ubuntu%2020.04%20GCC&label=Ubuntu%2020.04%20GCC
-[ci-windows-x86]: https://dev.azure.com/PointCloudLibrary/pcl/_apis/build/status/9?branchName=master&stageName=Build%20MSVC&jobName=Windows%20VS2017%20Build&configuration=Windows%20VS2017%20Build%20x86&label=Windows%20VS2017%20x86
-[ci-windows-x64]: https://dev.azure.com/PointCloudLibrary/pcl/_apis/build/status/9?branchName=master&stageName=Build%20MSVC&jobName=Windows%20VS2017%20Build&configuration=Windows%20VS2017%20Build%20x64&label=Windows%20VS2017%20x64
+[ci-ubuntu-18.04]: https://dev.azure.com/PointCloudLibrary/pcl/_apis/build/status/9?branchName=master&stageName=Build%20GCC&jobName=Ubuntu&configuration=Ubuntu%2018.04%20GCC&label=Ubuntu%2018.04%20GCC
+[ci-ubuntu-20.04]: https://dev.azure.com/PointCloudLibrary/pcl/_apis/build/status/9?branchName=master&stageName=Build%20Clang&jobName=Ubuntu&configuration=Ubuntu%2020.04%20Clang&label=Ubuntu%2020.04%20Clang
+[ci-ubuntu-20.10]: https://dev.azure.com/PointCloudLibrary/pcl/_apis/build/status/9?branchName=master&stageName=Build%20GCC&jobName=Ubuntu&configuration=Ubuntu%2020.10%20GCC&label=Ubuntu%2020.10%20GCC
+[ci-windows-x86]: https://dev.azure.com/PointCloudLibrary/pcl/_apis/build/status/9?branchName=master&stageName=Build%20MSVC&jobName=Windows%20Build&configuration=Windows%20Build%20x86&label=Windows%20VS2019%20x86
+[ci-windows-x64]: https://dev.azure.com/PointCloudLibrary/pcl/_apis/build/status/9?branchName=master&stageName=Build%20MSVC&jobName=Windows%20Build&configuration=Windows%20Build%20x64&label=Windows%20VS2019%20x64
[ci-macos-10.14]: https://dev.azure.com/PointCloudLibrary/pcl/_apis/build/status/9?branchName=master&stageName=Build%20Clang&jobName=macOS&configuration=macOS%20Mojave%2010.14&label=macOS%20Mojave%2010.14
[ci-macos-10.15]: https://dev.azure.com/PointCloudLibrary/pcl/_apis/build/status/9?branchName=master&stageName=Build%20Clang&jobName=macOS&configuration=macOS%20Catalina%2010.15&label=macOS%20Catalina%2010.15
+[ci-docs]: https://dev.azure.com/PointCloudLibrary/pcl/_apis/build/status/Documentation?branchName=master
+[ci-latest-docs]: https://dev.azure.com/PointCloudLibrary/pcl/_build/latest?definitionId=14&branchName=master
Build Platform | Status
------------------------ | ------------------------------------------------------------------------------------------------- |
-Ubuntu | [![Status][ci-ubuntu-16.04]][ci-latest-build] <br> [![Status][ci-ubuntu-18.04]][ci-latest-build] <br> [![Status][ci-ubuntu-20.04]][ci-latest-build] |
+Ubuntu | [![Status][ci-ubuntu-18.04]][ci-latest-build] <br> [![Status][ci-ubuntu-20.04]][ci-latest-build] <br> [![Status][ci-ubuntu-20.10]][ci-latest-build] |
Windows | [![Status][ci-windows-x86]][ci-latest-build] <br> [![Status][ci-windows-x64]][ci-latest-build] |
macOS | [![Status][ci-macos-10.14]][ci-latest-build] <br> [![Status][ci-macos-10.15]][ci-latest-build] |
+Documentation | [![Status][ci-docs]][ci-latest-docs] |
Community
---------
remember to tag your questions with the tag `point-cloud-library`.
* [Discord Server](https://discord.gg/JFFMAXS) for live chat with
other members of the PCL community and casual discussions
+
+Citation
+--------
+We encourage other researchers to cite PCL if they use PCL or its components for their work or baselines. The bibtex entry for the same is
+```
+@InProceedings{Rusu_ICRA2011_PCL,
+ author = {Radu Bogdan Rusu and Steve Cousins},
+ title = {{3D is here: Point Cloud Library (PCL)}},
+ booktitle = {{IEEE International Conference on Robotics and Automation (ICRA)}},
+ month = {May 9-13},
+ year = {2011},
+ address = {Shanghai, China},
+ publisher = {IEEE}
+}
+```
set(SUBSUBSYS_NAME 3d_rec_framework)
set(SUBSUBSYS_DESC "3D recognition framework")
set(SUBSUBSYS_DEPS common geometry io filters sample_consensus segmentation visualization kdtree features surface octree registration keypoints tracking search recognition ml)
-
-# Find VTK
-if(NOT VTK_FOUND)
- set(DEFAULT AUTO_OFF)
- set(REASON "VTK was not found.")
-else()
- set(DEFAULT TRUE)
- set(REASON)
- include("${VTK_USE_FILE}")
-endif()
-
-# OpenNI found?
-if(NOT WITH_OPENNI)
- set(DEFAULT AUTO_OFF)
- set(REASON "OpenNI was not found or was disabled by the user.")
-elseif(NOT ${DEFAULT} STREQUAL "AUTO_OFF")
- set(DEFAULT TRUE)
- set(REASON)
-endif()
+set(SUBSUBSYS_EXT_DEPS vtk openni)
# Default to not building for now
if(${DEFAULT} STREQUAL "TRUE")
endif()
PCL_SUBSUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSUBSYS_NAME}" "${SUBSUBSYS_DESC}" ${DEFAULT} "${REASON}")
-PCL_SUBSUBSYS_DEPEND(build "${SUBSYS_NAME}" "${SUBSUBSYS_NAME}" DEPS ${SUBSUBSYS_DEPS} EXT_DEPS vtk openni)
+PCL_SUBSUBSYS_DEPEND(build "${SUBSYS_NAME}" "${SUBSUBSYS_NAME}" DEPS ${SUBSUBSYS_DEPS} EXT_DEPS ${SUBSUBSYS_EXT_DEPS})
if(NOT build)
return()
if(WITH_OPENNI)
target_link_libraries("${LIB_NAME}" ${OPENNI_LIBRARIES})
- if(NOT WIN32)
- find_package(libusb-1.0 REQUIRED)
- target_link_libraries("${LIB_NAME}" ${LIBUSB_1_LIBRARIES})
- endif()
endif()
PCL_MAKE_PKGCONFIG(${LIB_NAME} COMPONENT ${SUBSUBSYS_NAME} DESC ${SUBSUBSYS_DESC})
#pragma once
#include <pcl/apps/3d_rec_framework/feature_wrapper/global/global_estimator.h>
-#include <pcl/apps/3d_rec_framework/feature_wrapper/normal_estimator.h>
#include <pcl/features/cvfh.h>
#include <pcl/surface/mls.h>
for (std::size_t i = 0; i < cvfh_signatures.size(); i++) {
pcl::PointCloud<FeatureT> vfh_signature;
- vfh_signature.points.resize(1);
+ vfh_signature.resize(1);
vfh_signature.width = vfh_signature.height = 1;
for (int d = 0; d < 308; ++d)
vfh_signature[0].histogram[d] = cvfh_signatures[i].histogram[d];
for (const auto& point : cvfh_signatures.points) {
pcl::PointCloud<FeatureT> vfh_signature;
- vfh_signature.points.resize(1);
+ vfh_signature.resize(1);
vfh_signature.width = vfh_signature.height = 1;
for (int d = 0; d < 308; ++d)
vfh_signature[0].histogram[d] = point.histogram[d];
#pragma once
#include <pcl/apps/3d_rec_framework/feature_wrapper/local/local_estimator.h>
-#include <pcl/apps/3d_rec_framework/feature_wrapper/normal_estimator.h>
#include <pcl/features/fpfh.h>
namespace pcl {
using KeypointExtractor<PointInT>::input_;
using KeypointExtractor<PointInT>::radius_;
float sampling_density_;
- std::shared_ptr<std::vector<std::vector<int>>> neighborhood_indices_;
+ std::shared_ptr<std::vector<pcl::Indices>> neighborhood_indices_;
std::shared_ptr<std::vector<std::vector<float>>> neighborhood_dist_;
void
tree.reset(new pcl::search::KdTree<PointInT>(false));
tree->setInputCloud(input);
- neighborhood_indices_.reset(new std::vector<std::vector<int>>);
+ neighborhood_indices_.reset(new std::vector<pcl::Indices>);
neighborhood_indices_->resize(keypoints_cloud->size());
neighborhood_dist_.reset(new std::vector<std::vector<float>>);
neighborhood_dist_->resize(keypoints_cloud->size());
- filtered_keypoints.points.resize(keypoints_cloud->size());
+ filtered_keypoints.resize(keypoints_cloud->size());
int good = 0;
for (std::size_t i = 0; i < keypoints_cloud->size(); i++) {
#include <pcl/apps/3d_rec_framework/feature_wrapper/local/local_estimator.h>
#include <pcl/apps/3d_rec_framework/feature_wrapper/normal_estimator.h>
#include <pcl/features/shot.h>
-#include <pcl/io/pcd_io.h>
namespace pcl {
namespace rec_3d_framework {
#include <pcl/apps/3d_rec_framework/feature_wrapper/local/local_estimator.h>
#include <pcl/apps/3d_rec_framework/feature_wrapper/normal_estimator.h>
#include <pcl/features/shot_omp.h>
-#include <pcl/io/pcd_io.h>
namespace pcl {
namespace rec_3d_framework {
KdTreeInPtr tree = pcl::make_shared<pcl::KdTreeFLANN<PointInT>>(false);
tree->setInputCloud(input);
- std::vector<int> nn_indices(9);
+ pcl::Indices nn_indices(9);
std::vector<float> nn_distances(9);
- std::vector<int> src_indices;
float sum_distances = 0.0;
std::vector<float> avg_distances(input->size());
}
if (j != static_cast<pcl::index_t>(out->size())) {
- PCL_ERROR("Contain nans...");
+ PCL_ERROR("Contain nans...\n");
}
out->points.resize(j);
#include <pcl/apps/3d_rec_framework/pc_source/source.h>
#include <pcl/apps/3d_rec_framework/utils/vtk_model_sampling.h>
#include <pcl/apps/render_views_tesselated_sphere.h>
-#include <pcl/io/io.h>
#include <pcl/io/pcd_io.h>
#include <vtkTransformPolyDataFilter.h>
void
loadOrGenerate(std::string& dir, std::string& model_path, ModelT& model)
{
- std::stringstream pathmodel;
- pathmodel << dir << "/" << model.class_ << "/" << model.id_;
- bf::path trained_dir = pathmodel.str();
+ const std::string pathmodel = dir + '/' + model.class_ + '/' + model.id_;
+ bf::path trained_dir = pathmodel;
model.views_.reset(new std::vector<pcl::PointCloud<pcl::PointXYZ>::Ptr>);
model.poses_.reset(
if (bf::exists(trained_dir)) {
// load views, poses and self-occlusions
std::vector<std::string> view_filenames;
- int number_of_views = 0;
for (const auto& dir_entry : bf::directory_iterator(trained_dir)) {
// check if its a directory, then get models in it
if (!(bf::is_directory(dir_entry))) {
if (extension == "pcd" && strs_[0] == "view") {
view_filenames.push_back((dir_entry.path().filename()).string());
-
- number_of_views++;
}
}
}
for (const auto& view_filename : view_filenames) {
- std::stringstream view_file;
- view_file << pathmodel.str() << "/" << view_filename;
+ const std::string view_file = pathmodel + '/' + view_filename;
typename pcl::PointCloud<PointInT>::Ptr cloud(new pcl::PointCloud<PointInT>());
- pcl::io::loadPCDFile(view_file.str(), *cloud);
+ pcl::io::loadPCDFile(view_file, *cloud);
model.views_->push_back(cloud);
boost::replace_all(file_replaced2, ".pcd", ".txt");
// read pose as well
- std::stringstream pose_file;
- pose_file << pathmodel.str() << "/" << file_replaced1;
+ const std::string pose_file = pathmodel + '/' + file_replaced1;
Eigen::Matrix4f pose;
- PersistenceUtils::readMatrixFromFile(pose_file.str(), pose);
+ PersistenceUtils::readMatrixFromFile(pose_file, pose);
model.poses_->push_back(pose);
// read entropy as well
- std::stringstream entropy_file;
- entropy_file << pathmodel.str() << "/" << file_replaced2;
+ const std::string entropy_file = pathmodel + '/' + file_replaced2;
float entropy = 0;
- PersistenceUtils::readFloatFromFile(entropy_file.str(), entropy);
+ PersistenceUtils::readFloatFromFile(entropy_file, entropy);
model.self_occlusions_->push_back(entropy);
}
}
model.self_occlusions_->push_back(entropies[i]);
}
- std::stringstream direc;
- direc << dir << "/" << model.class_ << "/" << model.id_;
+ const std::string direc = dir + '/' + model.class_ + '/' + model.id_;
this->createClassAndModelDirectories(dir, model.class_, model.id_);
for (std::size_t i = 0; i < model.views_->size(); i++) {
// save generated model for future use
- std::stringstream path_view;
- path_view << direc.str() << "/view_" << i << ".pcd";
- pcl::io::savePCDFileBinary(path_view.str(), *(model.views_->at(i)));
+ const std::string path_view = direc + "/view_" + std::to_string(i) + ".pcd";
+ pcl::io::savePCDFileBinary(path_view, *(model.views_->at(i)));
- std::stringstream path_pose;
- path_pose << direc.str() << "/pose_" << i << ".txt";
+ const std::string path_pose = direc + "/pose_" + std::to_string(i) + ".txt";
- pcl::rec_3d_framework::PersistenceUtils::writeMatrixToFile(path_pose.str(),
+ pcl::rec_3d_framework::PersistenceUtils::writeMatrixToFile(path_pose,
model.poses_->at(i));
- std::stringstream path_entropy;
- path_entropy << direc.str() << "/entropy_" << i << ".txt";
+ const std::string path_entropy =
+ direc + "/entropy_" + std::to_string(i) + ".txt";
pcl::rec_3d_framework::PersistenceUtils::writeFloatToFile(
- path_entropy.str(), model.self_occlusions_->at(i));
+ path_entropy, model.self_occlusions_->at(i));
}
loadOrGenerate(dir, model_path, model);
#pragma once
#include <pcl/apps/3d_rec_framework/pc_source/source.h>
-#include <pcl/io/io.h>
+#include <pcl/common/io.h>
#include <pcl/io/pcd_io.h>
#include <pcl/visualization/pcl_visualizer.h>
void
loadOrGenerate(std::string& dir, std::string& model_path, ModelT& model)
{
- std::stringstream pathmodel;
- pathmodel << dir << "/" << model.class_ << "/" << model.id_;
- bf::path trained_dir = pathmodel.str();
+ const std::string pathmodel = dir + '/' + model.class_ + '/' + model.id_;
+ const bf::path trained_dir = pathmodel;
model.views_.reset(new std::vector<typename pcl::PointCloud<PointInT>::Ptr>);
model.poses_.reset(
if (bf::exists(trained_dir)) {
// load views and poses
std::vector<std::string> view_filenames;
- int number_of_views = 0;
for (const auto& dir_entry : bf::directory_iterator(trained_dir)) {
// check if its a directory, then get models in it
if (!(bf::is_directory(*itr))) {
if (extension == "pcd" && strs_[0] == "view") {
view_filenames.push_back((dir_entry.path().filename()).string());
- number_of_views++;
}
}
}
poses_to_assemble_;
for (std::size_t i = 0; i < view_filenames.size(); i++) {
- std::stringstream view_file;
- view_file << pathmodel.str() << "/" << view_filenames[i];
+ const std::string view_file = pathmodel + '/' + view_filenames[i];
typename pcl::PointCloud<PointInT>::Ptr cloud(new pcl::PointCloud<PointInT>());
- pcl::io::loadPCDFile(view_file.str(), *cloud);
+ pcl::io::loadPCDFile(view_file, *cloud);
model.views_->push_back(cloud);
boost::replace_all(file_replaced1, ".pcd", ".txt");
// read pose as well
- std::stringstream pose_file;
- pose_file << pathmodel.str() << "/" << file_replaced1;
+ const std::string pose_file = pathmodel + '/' + file_replaced1;
Eigen::Matrix4f pose;
- PersistenceUtils::readMatrixFromFile(pose_file.str(), pose);
+ PersistenceUtils::readMatrixFromFile(pose_file, pose);
if (pose_files_order_ != 0) {
Eigen::Matrix4f pose_trans = pose.transpose();
else {
// we just need to copy the views to the training directory
- std::stringstream direc;
- direc << dir << "/" << model.class_ << "/" << model.id_;
+ const std::string direc = dir + '/' + model.class_ + '/' << model.id_;
createClassAndModelDirectories(dir, model.class_, model.id_);
std::vector<std::string> view_filenames;
std::cout << view_filenames.size() << std::endl;
for (std::size_t i = 0; i < view_filenames.size(); i++) {
- std::stringstream view_file;
- view_file << model_path << "/" << view_filenames[i];
+ const std::string view_file = model_path + '/' + view_filenames[i];
typename pcl::PointCloud<PointInT>::Ptr cloud(new pcl::PointCloud<PointInT>());
- pcl::io::loadPCDFile(view_file.str(), *cloud);
+ pcl::io::loadPCDFile(view_file, *cloud);
- std::cout << view_file.str() << std::endl;
+ std::cout << view_file << std::endl;
- std::stringstream path_view;
- path_view << direc.str() << "/view_" << i << ".pcd";
- pcl::io::savePCDFileBinary(path_view.str(), *cloud);
+ const std::string path_view = direc + "/view_" + std::to_string(i) + ".pcd";
+ pcl::io::savePCDFileBinary(path_view, *cloud);
- std::string file_replaced1(view_file.str());
- boost::replace_all(file_replaced1, view_prefix_, "pose");
- boost::replace_all(file_replaced1, ".pcd", ".txt");
+ boost::replace_all(view_file, view_prefix_, "pose");
+ boost::replace_all(view_file, ".pcd", ".txt");
Eigen::Matrix4f pose;
- PersistenceUtils::readMatrixFromFile(file_replaced1, pose);
+ PersistenceUtils::readMatrixFromFile(view_file, pose);
std::cout << pose << std::endl;
std::cout << pose << std::endl;
}
- std::stringstream path_pose;
- path_pose << direc.str() << "/pose_" << i << ".txt";
- pcl::rec_3d_framework::PersistenceUtils::writeMatrixToFile(path_pose.str(),
- pose);
+ const std::string path_pose = direc + "/pose_" + std::to_string(i) + ".txt";
+ pcl::rec_3d_framework::PersistenceUtils::writeMatrixToFile(path_pose, pose);
}
loadOrGenerate(dir, model_path, model);
// check if its a directory, then get models in it
if (bf::is_directory(dir_entry)) {
std::string so_far =
- rel_path_so_far + (dir_entry.path().filename()).string() + "/";
+ rel_path_so_far + (dir_entry.path().filename()).string() + '/';
bf::path curr_path = dir_entry.path();
if (isleafDirectory(curr_path)) {
m.id_ = strs[0];
}
else {
- std::stringstream ss;
- for (int i = 0; i < (static_cast<int>(strs.size()) - 1); i++) {
- ss << strs[i];
- if (i != (static_cast<int>(strs.size()) - 1))
- ss << "/";
- }
-
- m.class_ = ss.str();
+ m.class_ = boost::algorithm::join(strs, '/');
m.id_ = strs[strs.size() - 1];
}
// load views, poses and self-occlusions for those that exist
// generate otherwise
- std::stringstream model_path;
- model_path << path_ << "/" << files[i];
- std::string path_model = model_path.str();
- loadOrGenerate(training_dir, path_model, m);
+ const std::string model_path = path_ + '/' + files[i];
+ loadOrGenerate(training_dir, model_path, m);
models_->push_back(m);
}
#pragma once
#include <pcl/apps/3d_rec_framework/utils/persistence_utils.h>
-#include <pcl/common/transforms.h>
#include <pcl/filters/voxel_grid.h>
-#include <pcl/io/pcd_io.h>
#include <boost/algorithm/string.hpp>
#include <boost/filesystem.hpp>
std::string& id,
std::string& classname)
{
-
- std::vector<std::string> strs;
- boost::split(strs, filename, boost::is_any_of("/\\"));
- std::string name = strs[strs.size() - 1];
-
- std::stringstream ss;
- for (int i = 0; i < (static_cast<int>(strs.size()) - 1); i++) {
- ss << strs[i];
- if (i != (static_cast<int>(strs.size()) - 1))
- ss << "/";
- }
-
- classname = ss.str();
- id = name.substr(0, name.length() - 4);
+ const bf::path path = filename;
+ classname = path.parent_path().string() + '/';
+ id = path.stem().string();
}
void
std::string& class_str,
std::string& id_str)
{
- std::vector<std::string> strs;
- boost::split(strs, class_str, boost::is_any_of("/\\"));
-
- std::stringstream ss;
- ss << training_dir << "/";
- for (const auto& str : strs) {
- ss << str << "/";
- bf::path trained_dir = ss.str();
- if (!bf::exists(trained_dir))
- bf::create_directory(trained_dir);
- }
-
- ss << id_str;
- bf::path trained_dir = ss.str();
- if (!bf::exists(trained_dir))
- bf::create_directory(trained_dir);
+ bf::create_directories(training_dir + '/' + class_str + '/' + id_str);
}
public:
// check if its a directory, then get models in it
if (bf::is_directory(dir_entry)) {
std::string so_far =
- rel_path_so_far + (dir_entry.path().filename()).string() + "/";
+ rel_path_so_far + (dir_entry.path().filename()).string() + '/';
bf::path curr_path = dir_entry.path();
getModelsInDirectory(curr_path, so_far, relative_paths, ext);
bool
modelAlreadyTrained(ModelT m, std::string& base_dir, std::string& descr_name)
{
- std::stringstream dir;
- dir << base_dir << "/" << m.class_ << "/" << m.id_ << "/" << descr_name;
- bf::path desc_dir = dir.str();
- std::cout << dir.str() << std::endl;
- return bf::exists(desc_dir);
+ return bf::exists(getModelDescriptorDir(m, base_dir, descr_name));
}
std::string
getModelDescriptorDir(ModelT m, std::string& base_dir, std::string& descr_name)
{
- std::stringstream dir;
- dir << base_dir << "/" << m.class_ << "/" << m.id_ << "/" << descr_name;
- return dir.str();
+ return base_dir + '/' + m.class_ + '/' + m.id_ + '/' + descr_name;
}
void
#include <pcl/apps/3d_rec_framework/feature_wrapper/global/global_estimator.h>
#include <pcl/apps/3d_rec_framework/pc_source/source.h>
-#include <pcl/common/common.h>
#include <flann/flann.hpp>
classify() = 0;
virtual void
- setIndices(std::vector<int>& indices) = 0;
+ setIndices(pcl::Indices& indices) = 0;
virtual void
setInputCloud(const PointInTPtr& cloud) = 0;
flann::Index<DistT>* flann_index_;
std::vector<flann_model> flann_models_;
- std::vector<int> indices_;
+ pcl::Indices indices_;
// load features from disk and create flann structure
void
}
void
- setIndices(std::vector<int>& indices) override
+ setIndices(pcl::Indices& indices) override
{
indices_ = indices;
}
poses_cache_;
std::map<std::pair<std::string, int>, Eigen::Vector3f> centroids_cache_;
- std::vector<int> indices_;
+ pcl::Indices indices_;
// load features from disk and create flann structure
void
}
void
- setIndices(std::vector<int>& indices)
+ setIndices(pcl::Indices& indices)
{
indices_ = indices;
}
poses_cache_;
std::map<std::pair<std::string, int>, Eigen::Vector3f> centroids_cache_;
- std::vector<int> indices_;
+ pcl::Indices indices_;
bool compute_scale_;
}
void
- setIndices(std::vector<int>& indices)
+ setIndices(pcl::Indices& indices)
{
indices_ = indices;
}
if (!bf::exists(desc_dir))
bf::create_directory(desc_dir);
- std::stringstream path_view;
- path_view << path << "/view_" << v << ".pcd";
- pcl::io::savePCDFileBinary(path_view.str(), *processed);
-
- std::stringstream path_pose;
- path_pose << path << "/pose_" << v << ".txt";
- PersistenceUtils::writeMatrixToFile(path_pose.str(),
- models->at(i).poses_->at(v));
-
- std::stringstream path_entropy;
- path_entropy << path << "/entropy_" << v << ".txt";
- PersistenceUtils::writeFloatToFile(path_entropy.str(),
+ const std::string path_view = path + "/view_" + std::to_string(v) + ".pcd";
+ pcl::io::savePCDFileBinary(path_view, *processed);
+
+ const std::string path_pose = path + "/pose_" + std::to_string(v) + ".txt";
+ PersistenceUtils::writeMatrixToFile(path_pose, models->at(i).poses_->at(v));
+
+ const std::string path_entropy =
+ path + "/entropy_" + std::to_string(v) + ".txt";
+ PersistenceUtils::writeFloatToFile(path_entropy,
models->at(i).self_occlusions_->at(v));
// save signatures and centroids to disk
for (std::size_t j = 0; j < signatures.size(); j++) {
- std::stringstream path_centroid;
- path_centroid << path << "/centroid_" << v << "_" << j << ".txt";
+ const std::string path_centroid = path + "/centroid_" + std::to_string(v) +
+ "_" + std::to_string(j) + ".txt";
Eigen::Vector3f centroid(centroids[j][0], centroids[j][1], centroids[j][2]);
- PersistenceUtils::writeCentroidToFile(path_centroid.str(), centroid);
+ PersistenceUtils::writeCentroidToFile(path_centroid, centroid);
- std::stringstream path_descriptor;
- path_descriptor << path << "/descriptor_" << v << "_" << j << ".pcd";
- pcl::io::savePCDFileBinary(path_descriptor.str(), signatures[j]);
+ const std::string path_descriptor = path + "/descriptor_" +
+ std::to_string(v) + "_" +
+ std::to_string(j) + ".pcd";
+ pcl::io::savePCDFileBinary(path_descriptor, signatures[j]);
}
}
}
}
}
- std::stringstream dir;
- std::string path = source_->getModelDescriptorDir(model, training_dir_, descr_name_);
- dir << path << "/pose_" << view_id << ".txt";
+ const std::string path =
+ source_->getModelDescriptorDir(model, training_dir_, descr_name_);
+ const std::string dir = path + "/pose_" + std::to_string(view_id) + ".txt";
- PersistenceUtils::readMatrixFromFile(dir.str(), pose_matrix);
+ PersistenceUtils::readMatrixFromFile(dir, pose_matrix);
}
template <template <class> class Distance, typename PointInT, typename FeatureT>
{
hist.reset(new CRHPointCloud);
- std::stringstream dir;
- std::string path = source_->getModelDescriptorDir(model, training_dir_, descr_name_);
- dir << path << "/crh_" << view_id << "_" << d_id << ".pcd";
+ const std::string path =
+ source_->getModelDescriptorDir(model, training_dir_, descr_name_);
+ const std::string dir = path + "/centroid_" + std::to_string(view_id) + '_' +
+ std::to_string(d_id) + ".pcd";
- pcl::io::loadPCDFile(dir.str(), *hist);
+ pcl::io::loadPCDFile(dir, *hist);
}
template <template <class> class Distance, typename PointInT, typename FeatureT>
pcl::rec_3d_framework::GlobalNNCRHRecognizer<Distance, PointInT, FeatureT>::getCentroid(
ModelT& model, int view_id, int d_id, Eigen::Vector3f& centroid)
{
- std::stringstream dir;
- std::string path = source_->getModelDescriptorDir(model, training_dir_, descr_name_);
- dir << path << "/centroid_" << view_id << "_" << d_id << ".txt";
+ const std::string path =
+ source_->getModelDescriptorDir(model, training_dir_, descr_name_);
+ const std::string dir = path + "/centroid_" + std::to_string(view_id) + '_' +
+ std::to_string(d_id) + ".txt";
- PersistenceUtils::getCentroidFromFile(dir.str(), centroid);
+ PersistenceUtils::getCentroidFromFile(dir, centroid);
}
template <template <class> class Distance, typename PointInT, typename FeatureT>
ModelT& model, int view_id, PointInTPtr& view)
{
view.reset(new pcl::PointCloud<PointInT>);
- std::stringstream dir;
- std::string path = source_->getModelDescriptorDir(model, training_dir_, descr_name_);
- dir << path << "/view_" << view_id << ".pcd";
- pcl::io::loadPCDFile(dir.str(), *view);
+ const std::string path =
+ source_->getModelDescriptorDir(model, training_dir_, descr_name_);
+ const std::string dir = path + "/view_" + std::to_string(view_id) + ".pcd";
+ pcl::io::loadPCDFile(dir, *view);
}
template <template <class> class Distance, typename PointInT, typename FeatureT>
if (use_cache_) {
- std::stringstream dir_pose;
- dir_pose << path << "/pose_" << descr_model.view_id << ".txt";
+ const std::string dir_pose =
+ path + "/pose_" + std::to_string(descr_model.view_id) + ".txt";
Eigen::Matrix4f pose_matrix;
- PersistenceUtils::readMatrixFromFile(dir_pose.str(), pose_matrix);
+ PersistenceUtils::readMatrixFromFile(dir_pose, pose_matrix);
std::pair<std::string, int> pair_model_view =
std::make_pair(models->at(i).id_, descr_model.view_id);
if (!bf::exists(desc_dir))
bf::create_directory(desc_dir);
- std::stringstream path_view;
- path_view << path << "/view_" << v << ".pcd";
- pcl::io::savePCDFileBinary(path_view.str(), *processed);
+ const std::string path_view = path + "/view_" + std::to_string(v) + ".pcd";
+ pcl::io::savePCDFileBinary(path_view, *processed);
- std::stringstream path_pose;
- path_pose << path << "/pose_" << v << ".txt";
- PersistenceUtils::writeMatrixToFile(path_pose.str(),
- models->at(i).poses_->at(v));
+ const std::string path_pose = path + "/pose_" + std::to_string(v) + ".txt";
+ PersistenceUtils::writeMatrixToFile(path_pose, models->at(i).poses_->at(v));
- std::stringstream path_entropy;
- path_entropy << path << "/entropy_" << v << ".txt";
- PersistenceUtils::writeFloatToFile(path_entropy.str(),
+ const std::string path_entropy =
+ path + "/entropy_" + std::to_string(v) + ".txt";
+ PersistenceUtils::writeFloatToFile(path_entropy,
models->at(i).self_occlusions_->at(v));
std::vector<CRHPointCloud::Ptr> crh_histograms;
// save signatures and centroids to disk
for (std::size_t j = 0; j < signatures.size(); j++) {
- std::stringstream path_centroid;
- path_centroid << path << "/centroid_" << v << "_" << j << ".txt";
+ const std::string path_centroid = path + "/centroid_" + std::to_string(v) +
+ '_' + std::to_string(j) + ".txt";
Eigen::Vector3f centroid(centroids[j][0], centroids[j][1], centroids[j][2]);
- PersistenceUtils::writeCentroidToFile(path_centroid.str(), centroid);
+ PersistenceUtils::writeCentroidToFile(path_centroid, centroid);
- std::stringstream path_descriptor;
- path_descriptor << path << "/descriptor_" << v << "_" << j << ".pcd";
- pcl::io::savePCDFileBinary(path_descriptor.str(), signatures[j]);
+ const std::string path_descriptor = path + "/descriptor_" +
+ std::to_string(v) + '_' +
+ std::to_string(j) + ".pcd";
+ pcl::io::savePCDFileBinary(path_descriptor, signatures[j]);
- std::stringstream path_roll;
- path_roll << path << "/crh_" << v << "_" << j << ".pcd";
- pcl::io::savePCDFileBinary(path_roll.str(), *crh_histograms[j]);
+ const std::string path_roll =
+ path + "/crh_" + std::to_string(v) + '_' + std::to_string(j) + ".pcd";
+ pcl::io::savePCDFileBinary(path_roll, *crh_histograms[j]);
}
}
}
}
}
- std::stringstream dir;
- std::string path = source_->getModelDescriptorDir(model, training_dir_, descr_name_);
- dir << path << "/pose_" << view_id << ".txt";
+ const std::string path =
+ source_->getModelDescriptorDir(model, training_dir_, descr_name_);
+ const std::string dir = path + "/pose_" + std::to_string(view_id) + ".txt";
- PersistenceUtils::readMatrixFromFile(dir.str(), pose_matrix);
+ PersistenceUtils::readMatrixFromFile(dir, pose_matrix);
}
template <template <class> class Distance, typename PointInT, typename FeatureT>
pcl::rec_3d_framework::GlobalNNCVFHRecognizer<Distance, PointInT, FeatureT>::
getRollPose(ModelT& model, int view_id, int d_id, Eigen::Matrix4f& pose_matrix)
{
+ const std::string path =
+ source_->getModelDescriptorDir(model, training_dir_, descr_name_);
+ const std::string dir = path + "/roll_trans_" + std::to_string(view_id) + '_' +
+ std::to_string(d_id) + ".txt";
- std::stringstream dir;
- std::string path = source_->getModelDescriptorDir(model, training_dir_, descr_name_);
-
- dir << path << "/roll_trans_" << view_id << "_" << d_id << ".txt";
-
- bf::path file_path = dir.str();
+ const bf::path file_path = dir;
if (bf::exists(file_path)) {
- PersistenceUtils::readMatrixFromFile(dir.str(), pose_matrix);
+ PersistenceUtils::readMatrixFromFile(dir, pose_matrix);
return true;
}
return false;
pcl::rec_3d_framework::GlobalNNCVFHRecognizer<Distance, PointInT, FeatureT>::
getCentroid(ModelT& model, int view_id, int d_id, Eigen::Vector3f& centroid)
{
- std::stringstream dir;
- std::string path = source_->getModelDescriptorDir(model, training_dir_, descr_name_);
- dir << path << "/centroid_" << view_id << "_" << d_id << ".txt";
+ const std::string path =
+ source_->getModelDescriptorDir(model, training_dir_, descr_name_);
+ const std::string dir = path + "/centroid_" + std::to_string(view_id) + '_' +
+ std::to_string(d_id) + ".txt";
- PersistenceUtils::getCentroidFromFile(dir.str(), centroid);
+ PersistenceUtils::getCentroidFromFile(dir, centroid);
}
template <template <class> class Distance, typename PointInT, typename FeatureT>
ModelT& model, int view_id, PointInTPtr& view)
{
view.reset(new pcl::PointCloud<PointInT>);
- std::stringstream dir;
std::string path = source_->getModelDescriptorDir(model, training_dir_, descr_name_);
- dir << path << "/view_" << view_id << ".pcd";
- pcl::io::loadPCDFile(dir.str(), *view);
+ std::string dir = path + "/view_" + std::to_string(view_id) + ".pcd";
+ pcl::io::loadPCDFile(dir, *view);
}
template <template <class> class Distance, typename PointInT, typename FeatureT>
flann_models_.push_back(descr_model);
if (use_cache_) {
-
- std::stringstream dir_pose;
- dir_pose << path << "/pose_" << descr_model.view_id << ".txt";
+ const std::string dir_pose =
+ path + "/pose_" + std::to_string(descr_model.view_id) + ".txt";
Eigen::Matrix4f pose_matrix;
- PersistenceUtils::readMatrixFromFile(dir_pose.str(), pose_matrix);
+ PersistenceUtils::readMatrixFromFile(dir_pose, pose_matrix);
std::pair<std::string, int> pair_model_view =
std::make_pair(models->at(i).id_, descr_model.view_id);
if (!bf::exists(desc_dir))
bf::create_directory(desc_dir);
- std::stringstream path_view;
- path_view << path << "/view_" << v << ".pcd";
- pcl::io::savePCDFileBinary(path_view.str(), *processed);
+ const std::string path_view = path + "/view_" + std::to_string(v) + ".pcd";
+ pcl::io::savePCDFileBinary(path_view, *processed);
- std::stringstream path_pose;
- path_pose << path << "/pose_" << v << ".txt";
- PersistenceUtils::writeMatrixToFile(path_pose.str(),
- models->at(i).poses_->at(v));
+ const std::string path_pose = path + "/pose_" + std::to_string(v) + ".txt";
+ PersistenceUtils::writeMatrixToFile(path_pose, models->at(i).poses_->at(v));
- std::stringstream path_entropy;
- path_entropy << path << "/entropy_" << v << ".txt";
- PersistenceUtils::writeFloatToFile(path_entropy.str(),
+ const std::string path_entropy =
+ path + "/entropy_" + std::to_string(v) + ".txt";
+ PersistenceUtils::writeFloatToFile(path_entropy,
models->at(i).self_occlusions_->at(v));
// save signatures and centroids to disk
for (std::size_t j = 0; j < signatures.size(); j++) {
if (valid_trans[j]) {
- std::stringstream path_centroid;
- path_centroid << path << "/centroid_" << v << "_" << j << ".txt";
+ const std::string path_centroid = path + "/centroid_" + std::to_string(v) +
+ '_' + std::to_string(j) + ".txt";
Eigen::Vector3f centroid(centroids[j][0], centroids[j][1], centroids[j][2]);
- PersistenceUtils::writeCentroidToFile(path_centroid.str(), centroid);
+ PersistenceUtils::writeCentroidToFile(path_centroid, centroid);
- std::stringstream path_descriptor;
- path_descriptor << path << "/descriptor_" << v << "_" << j << ".pcd";
- pcl::io::savePCDFileBinary(path_descriptor.str(), signatures[j]);
+ const std::string path_descriptor = path + "/descriptor_" +
+ std::to_string(v) + '_' +
+ std::to_string(j) + ".pcd";
+ pcl::io::savePCDFileBinary(path_descriptor, signatures[j]);
// save roll transform
- std::stringstream path_pose;
- path_pose << path << "/roll_trans_" << v << "_" << j << ".txt";
- PersistenceUtils::writeMatrixToFile(path_pose.str(), transforms[j]);
+ const std::string path_pose = path + "/roll_trans_" + std::to_string(v) +
+ '_' + std::to_string(j) + ".txt";
+ PersistenceUtils::writeMatrixToFile(path_pose, transforms[j]);
}
}
}
descr_model.view_id = atoi(strs[1].c_str());
if (use_cache_) {
-
- std::stringstream dir_keypoints;
std::string path =
source_->getModelDescriptorDir(models->at(i), training_dir_, descr_name_);
- dir_keypoints << path << "/keypoint_indices_" << descr_model.view_id
- << ".pcd";
+ const std::string dir_keypoints = path + "/keypoint_indices_" +
+ std::to_string(descr_model.view_id) +
+ ".pcd";
- std::stringstream dir_pose;
- dir_pose << path << "/pose_" << descr_model.view_id << ".txt";
+ const std::string dir_pose =
+ path + "/pose_" + std::to_string(descr_model.view_id) + ".txt";
Eigen::Matrix4f pose_matrix;
- PersistenceUtils::readMatrixFromFile(dir_pose.str(), pose_matrix);
+ PersistenceUtils::readMatrixFromFile(dir_pose, pose_matrix);
std::pair<std::string, int> pair_model_view =
std::make_pair(models->at(i).id_, descr_model.view_id);
// load keypoints and save them to cache
typename pcl::PointCloud<PointInT>::Ptr keypoints(
new pcl::PointCloud<PointInT>());
- pcl::io::loadPCDFile(dir_keypoints.str(), *keypoints);
+ pcl::io::loadPCDFile(dir_keypoints, *keypoints);
keypoints_cache_[pair_model_view] = keypoints;
}
if (!bf::exists(desc_dir))
bf::create_directory(desc_dir);
- std::stringstream path_view;
- path_view << path << "/view_" << v << ".pcd";
- pcl::io::savePCDFileBinary(path_view.str(), *processed);
+ const std::string path_view = path = "/view_" + std::to_string(v) + ".pcd";
+ pcl::io::savePCDFileBinary(path_view, *processed);
- std::stringstream path_pose;
- path_pose << path << "/pose_" << v << ".txt";
- PersistenceUtils::writeMatrixToFile(path_pose.str(),
- models->at(i).poses_->at(v));
+ const std::string path_pose = path + "/pose_" + std::to_string(v) + ".txt";
+ PersistenceUtils::writeMatrixToFile(path_pose, models->at(i).poses_->at(v));
if (v < models->at(i).self_occlusions_->size()) {
- std::stringstream path_entropy;
- path_entropy << path << "/entropy_" << v << ".txt";
- PersistenceUtils::writeFloatToFile(path_entropy.str(),
+ const std::string path_entropy =
+ path + "/entropy_" + std::to_string(v) + ".txt";
+ PersistenceUtils::writeFloatToFile(path_entropy,
models->at(i).self_occlusions_->at(v));
}
// save keypoints and signatures to disk
- std::stringstream keypoints_sstr;
- keypoints_sstr << path << "/keypoint_indices_" << v << ".pcd";
-
- pcl::io::savePCDFileBinary(keypoints_sstr.str(), *keypoints_pointcloud);
+ const std::string keypoints_sstr =
+ path + "/keypoint_indices_" + std::to_string(v) + ".pcd";
+ pcl::io::savePCDFileBinary(keypoints_sstr, *keypoints_pointcloud);
- std::stringstream path_descriptor;
- path_descriptor << path << "/descriptor_" << v << ".pcd";
- pcl::io::savePCDFileBinary(path_descriptor.str(), *signatures);
+ const std::string path_descriptor =
+ path + "/descriptor_" + std::to_string(v) + ".pcd";
+ pcl::io::savePCDFileBinary(path_descriptor, *signatures);
}
}
}
}
}
- std::stringstream dir;
- std::string path = source_->getModelDescriptorDir(model, training_dir_, descr_name_);
- dir << path << "/pose_" << view_id << ".txt";
+ const std::string path =
+ source_->getModelDescriptorDir(model, training_dir_, descr_name_);
+ const std::string dir = path + "/pose_" + std::to_string(view_id) + ".txt";
- PersistenceUtils::readMatrixFromFile(dir.str(), pose_matrix);
+ PersistenceUtils::readMatrixFromFile(dir, pose_matrix);
}
template <template <class> class Distance, typename PointInT, typename FeatureT>
}
}
- std::stringstream dir;
- std::string path = source_->getModelDescriptorDir(model, training_dir_, descr_name_);
- dir << path << "/keypoint_indices_" << view_id << ".pcd";
+ const std::string path =
+ source_->getModelDescriptorDir(model, training_dir_, descr_name_);
+ const std::string dir =
+ path + "/keypoint_indices_" + std::to_string(view_id) + ".pcd";
- pcl::io::loadPCDFile(dir.str(), *keypoints_cloud);
+ pcl::io::loadPCDFile(dir, *keypoints_cloud);
}
#include <pcl/apps/3d_rec_framework/feature_wrapper/local/local_estimator.h>
#include <pcl/apps/3d_rec_framework/pc_source/source.h>
-#include <pcl/common/common.h>
#include <pcl/recognition/cg/correspondence_grouping.h>
#include <pcl/recognition/hv/hypotheses_verification.h>
#include <pcl/visualization/pcl_visualizer.h>
flann::Index<DistT>* flann_index_;
std::vector<flann_model> flann_models_;
- std::vector<int> indices_;
+ pcl::Indices indices_;
bool use_cache_;
std::map<std::pair<std::string, int>,
p_scene.getVector4fMap() =
(*keypoints_pointcloud)[correspondences[kk].index_match].getVector4fMap();
- std::stringstream line_name;
- line_name << "line_" << kk;
-
- vis_corresp_.addLine<pcl::PointXYZ, pcl::PointXYZ>(p_scene, p, line_name.str());
+ const std::string line_name = "line_" + std::to_string(kk);
+ vis_corresp_.addLine<pcl::PointXYZ, pcl::PointXYZ>(p_scene, p, line_name);
}
vis_corresp_.spin();
}
void
- setIndices(std::vector<int>& indices)
+ setIndices(pcl::Indices& indices)
{
indices_ = indices;
}
#include <pcl/common/utils.h> // pcl::utils::ignore
-#include <cmath>
-#include <cstdlib>
-
namespace Metrics {
using ::std::abs;
return strs[strs.size() - 2];
}
-inline bool
-readFloatFromFile(const std::string& dir, std::string file, float& value)
-{
-
- std::vector<std::string> strs;
- boost::split(strs, file, boost::is_any_of("/"));
-
- std::string str;
- for (std::size_t i = 0; i < (strs.size() - 1); i++) {
- str += strs[i] + "/";
- }
-
- std::stringstream matrix_file;
- matrix_file << dir << "/" << str << "entropy_" << getViewId(file) << ".txt";
-
- std::ifstream in;
- in.open(matrix_file.str().c_str(), std::ifstream::in);
-
- char linebuf[1024];
- in.getline(linebuf, 1024);
- value = static_cast<float>(atof(linebuf));
-
- return true;
-}
-
inline bool
readFloatFromFile(const std::string& file, float& value)
{
return true;
}
-inline bool
-readMatrixFromFile(std::string dir, std::string file, Eigen::Matrix4f& matrix)
-{
-
- // get the descriptor name from dir
- std::vector<std::string> path;
- boost::split(path, dir, boost::is_any_of("/"));
-
- std::string dname = path[path.size() - 1];
- std::string file_replaced;
- for (std::size_t i = 0; i < (path.size() - 1); i++) {
- file_replaced += path[i] + "/";
- }
-
- boost::split(path, file, boost::is_any_of("/"));
- std::string id;
-
- for (std::size_t i = 0; i < (path.size() - 1); i++) {
- id += path[i];
- if (i < (path.size() - 1)) {
- id += "/";
- }
- }
-
- boost::split(path, file, boost::is_any_of("/"));
- std::string filename = path[path.size() - 1];
-
- std::stringstream matrix_file;
- matrix_file << file_replaced << id << "/" << dname << "/pose_" << getViewId(file)
- << ".txt";
-
- std::ifstream in;
- in.open(matrix_file.str().c_str(), std::ifstream::in);
-
- char linebuf[1024];
- in.getline(linebuf, 1024);
- std::string line(linebuf);
- std::vector<std::string> strs_2;
- boost::split(strs_2, line, boost::is_any_of(" "));
-
- for (int i = 0; i < 16; i++) {
- matrix(i % 4, i / 4) = static_cast<float>(atof(strs_2[i].c_str()));
- }
-
- return true;
-}
-
inline bool
readMatrixFromFile(const std::string& file, Eigen::Matrix4f& matrix)
{
return true;
}
-
-inline bool
-readMatrixFromFile2(const std::string& file, Eigen::Matrix4f& matrix)
-{
-
- std::ifstream in;
- in.open(file.c_str(), std::ifstream::in);
-
- char linebuf[1024];
- in.getline(linebuf, 1024);
- std::string line(linebuf);
- std::vector<std::string> strs_2;
- boost::split(strs_2, line, boost::is_any_of(" "));
-
- for (int i = 0; i < 16; i++) {
- matrix(i / 4, i % 4) = static_cast<float>(atof(strs_2[i].c_str()));
- }
-
- return true;
-}
-
-template <typename PointInT>
-inline void
-getPointCloudFromFile(std::string dir,
- std::string file,
- typename pcl::PointCloud<PointInT>::Ptr& cloud)
-{
-
- // get the descriptor name from dir
- std::vector<std::string> path;
- boost::split(path, dir, boost::is_any_of("/"));
-
- std::string dname = path[path.size() - 1];
- std::string file_replaced;
- for (std::size_t i = 0; i < (path.size() - 1); i++) {
- file_replaced += path[i] + "/";
- }
-
- boost::split(path, file, boost::is_any_of("/"));
- std::string id;
-
- for (std::size_t i = 0; i < (path.size() - 1); i++) {
- id += path[i];
- if (i < (path.size() - 1)) {
- id += "/";
- }
- }
-
- std::stringstream view_file;
- view_file << file_replaced << id << "/" << dname << "/view_" << getViewId(file)
- << ".pcd";
-
- pcl::io::loadPCDFile(view_file.str(), *cloud);
-}
-
} // namespace PersistenceUtils
} // namespace rec_3d_framework
} // namespace pcl
#pragma once
-#include <pcl/common/common.h>
+#include <pcl/visualization/vtk/pcl_vtk_compatibility.h>
#include <vtkCellArray.h>
#include <vtkPLYReader.h>
double A[3], B[3], C[3];
vtkIdType npts = 0;
- vtkIdType* ptIds = nullptr;
+ vtkCellPtsPtr ptIds = nullptr;
polydata->GetCellPoints(el, npts, ptIds);
if (ptIds == nullptr)
double p1[3], p2[3], p3[3], totalArea = 0;
std::vector<double> cumulativeAreas(cells->GetNumberOfCells(), 0);
std::size_t i = 0;
- vtkIdType npts = 0, *ptIds = nullptr;
+ vtkIdType npts = 0;
+ vtkCellPtsPtr ptIds = nullptr;
+
for (cells->InitTraversal(); cells->GetNextCell(npts, ptIds); i++) {
polydata->GetPoint(ptIds[0], p1);
polydata->GetPoint(ptIds[1], p2);
vis.addPointCloud<OpenNIFrameSource::PointT>(frame, "frame");
for (std::size_t i = 0; i < previous_cluster_size; i++) {
- std::stringstream cluster_name;
- cluster_name << "cluster_" << i;
- vis.removePointCloud(cluster_name.str());
+ std::string cluster_name = "cluster_" + std::to_string(i);
+ vis.removePointCloud(cluster_name);
- cluster_name << "_ply_model_";
- vis.removeShape(cluster_name.str());
+ cluster_name += "_ply_model_";
+ vis.removeShape(cluster_name);
}
for (std::size_t i = 0; i < previous_categories_size; i++) {
- std::stringstream cluster_text;
- cluster_text << "cluster_" << i << "_text";
- vis.removeText3D(cluster_text.str());
+ const std::string cluster_text = "cluster_" + std::to_string(i) + "_text";
+ vis.removeText3D(cluster_text);
}
previous_categories_size = 0;
float dist_ = 0.03f;
for (std::size_t i = 0; i < clusters.size(); i++) {
- std::stringstream cluster_name;
- cluster_name << "cluster_" << i;
+ const std::string cluster_name = "cluster_" + std::to_string(i);
pcl::visualization::PointCloudColorHandlerRandom<pcl::PointXYZ> random_handler(
clusters[i]);
- vis.addPointCloud<pcl::PointXYZ>(clusters[i], random_handler, cluster_name.str());
+ vis.addPointCloud<pcl::PointXYZ>(clusters[i], random_handler, cluster_name);
global.setInputCloud(xyz_points);
global.setIndices(indices[i].indices);
prob_str.precision(1);
prob_str << categories[kk] << " [" << conf[kk] << "]";
- std::stringstream cluster_text;
- cluster_text << "cluster_" << previous_categories_size << "_text";
- vis.addText3D(prob_str.str(), pos, text_scale, 1, 0, 1, cluster_text.str(), 0);
+ const std::string cluster_text =
+ "cluster_" + std::to_string(previous_categories_size) + "_text";
+ vis.addText3D(prob_str.str(), pos, text_scale, 1, 0, 1, cluster_text, 0);
previous_categories_size++;
}
}
#include <pcl/apps/3d_rec_framework/feature_wrapper/local/shot_local_estimator_omp.h>
#include <pcl/apps/3d_rec_framework/pc_source/mesh_source.h>
#include <pcl/apps/3d_rec_framework/pipeline/local_recognizer.h>
+#include <pcl/common/transforms.h> // for transformPointCloud
#include <pcl/console/parse.h>
#include <pcl/recognition/cg/correspondence_grouping.h>
#include <pcl/recognition/cg/geometric_consistency.h>
-#include <pcl/recognition/cg/hough_3d.h>
#include <pcl/recognition/hv/greedy_verification.h>
#include <pcl/recognition/hv/hv_go.h>
#include <pcl/recognition/hv/hv_papazov.h>
if ((std::size_t)scene != i)
continue;
- std::stringstream file;
- file << ply_files_dir.string() << files[i];
+ const std::string file = ply_files_dir.string() + files[i];
typename pcl::PointCloud<PointT>::Ptr scene(new pcl::PointCloud<PointT>());
- pcl::io::loadPCDFile(file.str(), *scene);
+ pcl::io::loadPCDFile(file, *scene);
local.setVoxelSizeICP(0.005f);
local.setInputCloud(scene);
local.recognize();
}
- std::stringstream scene_name;
- scene_name << "Scene " << (i + 1);
+ const std::string scene_name = "Scene " + std::to_string(i + 1);
vis.addPointCloud<PointT>(scene, "scene_cloud");
- vis.addText(scene_name.str(), 1, 30, 24, 1, 0, 0, "scene_text");
+ vis.addText(scene_name, 1, 30, 24, 1, 0, 0, "scene_text");
// visualize results
auto models = local.getModels();
auto transforms = local.getTransforms();
for (std::size_t j = 0; j < models->size(); j++) {
- std::stringstream name;
- name << "cloud_" << j;
+ const std::string name = "cloud_" + std::to_string(j);
ConstPointInTPtr model_cloud = models->at(j).getAssembled(0.0025f);
typename pcl::PointCloud<PointT>::Ptr model_aligned(
pcl::visualization::PointCloudColorHandlerCustom<PointT> random_handler(
model_aligned, r, g, b);
- vis.addPointCloud<PointT>(model_aligned, random_handler, name.str());
+ vis.addPointCloud<PointT>(model_aligned, random_handler, name);
}
vis.spin();
vis.removePointCloud("scene_cloud");
vis.removeShape("scene_text");
for (std::size_t j = 0; j < models->size(); j++) {
- std::stringstream name;
- name << "cloud_" << j;
- vis.removePointCloud(name.str());
+ const std::string name = "cloud_" + std::to_string(j);
+ vis.removePointCloud(name);
}
}
}
local.initialize();
for (std::size_t i = 0; i < files.size(); i++) {
- std::stringstream file;
- file << ply_files_dir.string() << files[i];
+ const std::string file = ply_files_dir.string() + files[i];
typename pcl::PointCloud<PointT>::Ptr scene(new pcl::PointCloud<PointT>());
- pcl::io::loadPCDFile(file.str(), *scene);
+ pcl::io::loadPCDFile(file, *scene);
local.setInputCloud(scene);
local.recognize();
- std::stringstream scene_name;
- scene_name << "Scene " << (i + 1);
+ const std::string scene_name = "Scene " + std::to_string(i + 1);
vis.addPointCloud<PointT>(scene, "scene_cloud");
- vis.addText(scene_name.str(), 1, 30, 24, 1, 0, 0, "scene_text");
+ vis.addText(scene_name, 1, 30, 24, 1, 0, 0, "scene_text");
// visualize results
auto models = local.getModels();
auto transforms = local.getTransforms();
for (std::size_t j = 0; j < models->size(); j++) {
- std::stringstream name;
- name << "cloud_" << j;
+ const std::string name = "cloud_" + std::to_string(j);
ConstPointInTPtr model_cloud = models->at(j).getAssembled(0.0025f);
typename pcl::PointCloud<PointT>::Ptr model_aligned(
pcl::visualization::PointCloudColorHandlerRandom<PointT> random_handler(
model_aligned);
- vis.addPointCloud<PointT>(model_aligned, random_handler, name.str());
+ vis.addPointCloud<PointT>(model_aligned, random_handler, name);
}
vis.spin();
vis.removePointCloud("scene_cloud");
vis.removeShape("scene_text");
for (std::size_t j = 0; j < models->size(); j++) {
- std::stringstream name;
- name << "cloud_" << j;
- vis.removePointCloud(name.str());
+ const std::string name = "cloud_" + std::to_string(j);
+ vis.removePointCloud(name);
}
}
}
set(SUBSYS_NAME apps)
set(SUBSYS_DESC "Application examples/samples that show how PCL works")
set(SUBSYS_DEPS common geometry io filters sample_consensus segmentation visualization kdtree features surface octree registration keypoints tracking search recognition ml stereo 2d)
+set(SUBSYS_OPT_DEPS openni vtk Qt5)
set(DEFAULT FALSE)
set(REASON "Disabled by default")
PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ${DEFAULT} "${REASON}")
-PCL_SUBSYS_DEPEND(build "${SUBSYS_NAME}" DEPS ${SUBSYS_DEPS} OPT_DEPS openni vtk)
+PCL_SUBSYS_DEPEND(build "${SUBSYS_NAME}" DEPS ${SUBSYS_DEPS} OPT_DEPS ${SUBSYS_OPT_DEPS})
if(NOT build)
return()
endif()
+#Enable cmakes QT auto features.
+set(CMAKE_AUTOMOC ON)
+set(CMAKE_AUTORCC ON)
+set(CMAKE_AUTOUIC ON)
+
# to be filled with all targets the apps subsystem
set(PCL_APPS_ALL_TARGETS)
PCL_ADD_EXECUTABLE(pcl_statistical_multiscale_interest_region_extraction_example COMPONENT ${SUBSYS_NAME} SOURCES src/statistical_multiscale_interest_region_extraction_example.cpp)
target_link_libraries(pcl_statistical_multiscale_interest_region_extraction_example pcl_common pcl_io pcl_features pcl_filters)
-if(LIBUSB_1_FOUND)
+if(LIBUSB_FOUND)
PCL_ADD_EXECUTABLE(pcl_dinast_grabber COMPONENT ${SUBSYS_NAME} SOURCES src/dinast_grabber_example.cpp)
target_link_libraries(pcl_dinast_grabber pcl_common pcl_visualization pcl_io)
endif()
if(VTK_FOUND)
- include("${VTK_USE_FILE}")
-
set(incs "include/pcl/${SUBSYS_NAME}/render_views_tesselated_sphere.h")
set(srcs "src/render_views_tesselated_sphere.cpp")
if(QHULL_FOUND)
PCL_ADD_EXECUTABLE(pcl_pcd_select_object_plane COMPONENT ${SUBSYS_NAME} SOURCES src/pcd_select_object_plane.cpp)
target_link_libraries(pcl_pcd_select_object_plane pcl_common pcl_io pcl_filters pcl_visualization pcl_segmentation pcl_features pcl_surface)
+ #TODO: Update when CMAKE 3.10 is available
+ if(NOT (${VTK_VERSION} VERSION_LESS 9.0))
+ target_link_libraries(pcl_pcd_select_object_plane VTK::FiltersGeometry)
+ endif()
endif()
PCL_ADD_EXECUTABLE(pcl_pcd_organized_edge_detection COMPONENT ${SUBSYS_NAME} SOURCES src/pcd_organized_edge_detection.cpp)
PCL_ADD_EXECUTABLE(pcl_stereo_ground_segmentation COMPONENT ${SUBSYS_NAME} SOURCES src/stereo_ground_segmentation.cpp)
target_link_libraries(pcl_stereo_ground_segmentation pcl_common pcl_io pcl_filters pcl_visualization pcl_segmentation pcl_features pcl_stereo)
- if(Qt5_FOUND AND QVTK_FOUND)
+ if(Qt5_FOUND AND HAVE_QVTK)
# Manual registration demo
- QT5_WRAP_UI(manual_registration_ui src/manual_registration/manual_registration.ui)
- QT5_WRAP_CPP(manual_registration_moc include/pcl/apps/manual_registration.h OPTIONS -DBOOST_TT_HAS_OPERATOR_HPP_INCLUDED -DBOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION)
- PCL_ADD_EXECUTABLE(pcl_manual_registration COMPONENT ${SUBSYS_NAME} SOURCES ${manual_registration_ui} ${manual_registration_moc} src/manual_registration/manual_registration.cpp BUNDLE)
+ PCL_ADD_EXECUTABLE(pcl_manual_registration
+ COMPONENT
+ ${SUBSYS_NAME}
+ SOURCES
+ include/pcl/apps/manual_registration.h
+ src/manual_registration/manual_registration.cpp
+ src/manual_registration/manual_registration.ui
+ BUNDLE)
+
target_link_libraries(pcl_manual_registration pcl_common pcl_io pcl_visualization pcl_segmentation pcl_features pcl_surface Qt5::Widgets)
+ #TODO: Update when CMAKE 3.10 is available
+ if(NOT (${VTK_VERSION} VERSION_LESS 9.0))
+ target_link_libraries(pcl_manual_registration VTK::GUISupportQt)
+ endif()
- QT5_WRAP_UI(pcd_video_player_ui src/pcd_video_player/pcd_video_player.ui)
- QT5_WRAP_CPP(pcd_video_player_moc include/pcl/apps/pcd_video_player.h OPTIONS -DBOOST_TT_HAS_OPERATOR_HPP_INCLUDED -DBOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION)
- PCL_ADD_EXECUTABLE(pcl_pcd_video_player COMPONENT ${SUBSYS_NAME} SOURCES ${pcd_video_player_ui} ${pcd_video_player_moc} src/pcd_video_player/pcd_video_player.cpp BUNDLE)
+ PCL_ADD_EXECUTABLE(pcl_pcd_video_player
+ COMPONENT
+ ${SUBSYS_NAME}
+ SOURCES
+ include/pcl/apps/pcd_video_player.h
+ src/pcd_video_player/pcd_video_player.cpp
+ src/pcd_video_player/pcd_video_player.ui
+ BUNDLE)
+
target_link_libraries(pcl_pcd_video_player pcl_common pcl_io pcl_visualization pcl_segmentation pcl_features pcl_surface Qt5::Widgets)
+ #TODO: Update when CMAKE 3.10 is available
+ if(NOT (${VTK_VERSION} VERSION_LESS 9.0))
+ target_link_libraries(pcl_pcd_video_player VTK::GUISupportQt)
+ endif()
endif()
if(WITH_OPENNI)
PCL_ADD_EXECUTABLE(pcl_openni_face_detector COMPONENT ${SUBSYS_NAME} SOURCES src/face_detection//openni_face_detection.cpp src/face_detection//openni_frame_source.cpp BUNDLE)
target_link_libraries(pcl_openni_face_detector pcl_features pcl_recognition pcl_common pcl_io pcl_filters pcl_visualization pcl_segmentation pcl_sample_consensus pcl_surface pcl_keypoints pcl_ml pcl_search pcl_kdtree ${VTK_LIBRARIES})
- if(Qt5_FOUND AND QVTK_FOUND)
- # OpenNI Passthrough application demo
- QT5_WRAP_UI(openni_passthrough_ui src/openni_passthrough.ui)
- QT5_WRAP_CPP(openni_passthrough_moc include/pcl/apps/openni_passthrough.h OPTIONS -DBOOST_TT_HAS_OPERATOR_HPP_INCLUDED -DBOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION)
- PCL_ADD_EXECUTABLE(pcl_openni_passthrough COMPONENT ${SUBSYS_NAME} SOURCES ${openni_passthrough_ui} ${openni_passthrough_moc} src/openni_passthrough.cpp)
- target_link_libraries(pcl_openni_passthrough pcl_common pcl_io pcl_filters pcl_visualization Qt5::Widgets)
-
- # OpenNI Organized Connected Component application demo
- QT5_WRAP_UI(organized_segmentation_demo_ui src/organized_segmentation_demo.ui)
- QT5_WRAP_CPP(organized_segmentation_demo_moc include/pcl/apps/organized_segmentation_demo.h OPTIONS -DBOOST_TT_HAS_OPERATOR_HPP_INCLUDED -DBOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION)
- PCL_ADD_EXECUTABLE(pcl_organized_segmentation_demo COMPONENT ${SUBSYS_NAME} SOURCES ${organized_segmentation_demo_ui} ${organized_segmentation_demo_moc} src/organized_segmentation_demo.cpp BUNDLE)
- target_link_libraries(pcl_organized_segmentation_demo pcl_common pcl_io pcl_visualization pcl_segmentation pcl_features pcl_surface Qt5::Widgets)
+ if(Qt5_FOUND AND HAVE_QVTK)
+ # OpenNI Passthrough application demo
+ PCL_ADD_EXECUTABLE(pcl_openni_passthrough
+ COMPONENT
+ ${SUBSYS_NAME}
+ SOURCES
+ include/pcl/apps/openni_passthrough.h
+ src/openni_passthrough.cpp
+ src/openni_passthrough.ui)
+
+ target_link_libraries(pcl_openni_passthrough pcl_common pcl_io pcl_filters pcl_visualization Qt5::Widgets)
+ #TODO: Update when CMAKE 3.10 is available
+ if(NOT (${VTK_VERSION} VERSION_LESS 9.0))
+ target_link_libraries(pcl_openni_passthrough VTK::GUISupportQt)
+ endif()
+
+ list(APPEND CMAKE_AUTOUIC_SEARCH_PATHS "src")
+
+ # OpenNI Organized Connected Component application demo
+ PCL_ADD_EXECUTABLE(pcl_organized_segmentation_demo
+ COMPONENT
+ ${SUBSYS_NAME}
+ SOURCES
+ include/pcl/apps/organized_segmentation_demo_qt.h
+ include/pcl/apps/organized_segmentation_demo.h
+ src/organized_segmentation_demo.cpp
+ src/organized_segmentation_demo.ui
+ BUNDLE)
+ target_link_libraries(pcl_organized_segmentation_demo pcl_common pcl_io pcl_visualization pcl_segmentation pcl_features pcl_surface Qt5::Widgets)
+ #TODO: Update when CMAKE 3.10 is available
+ if(NOT (${VTK_VERSION} VERSION_LESS 9.0))
+ target_link_libraries(pcl_organized_segmentation_demo VTK::GUISupportQt)
+ endif()
endif()
if(QHULL_FOUND)
set(SUBSUBSYS_NAME cloud_composer)
set(SUBSUBSYS_DESC "Cloud Composer - Application for Manipulating Point Clouds")
set(SUBSUBSYS_DEPS common io visualization filters apps)
-
-# Find VTK
-if(NOT VTK_FOUND)
- set(DEFAULT AUTO_OFF)
- set(REASON "VTK was not found.")
-else()
- set(DEFAULT TRUE)
- set(REASON)
- include("${VTK_USE_FILE}")
-endif()
-
-# QT5 Found?
-if(NOT Qt5_FOUND)
- set(DEFAULT AUTO_OFF)
- set(REASON "Qt5 was not found.")
-elseif(NOT ${DEFAULT} STREQUAL "AUTO_OFF")
- set(DEFAULT TRUE)
- set(REASON)
-endif()
+set(SUBSUBSYS_EXT_DEPS vtk Qt5)
# QVTK?
-if(NOT QVTK_FOUND)
+if(NOT HAVE_QVTK)
set(DEFAULT AUTO_OFF)
set(REASON "Cloud composer requires QVTK")
elseif(NOT ${DEFAULT} STREQUAL "AUTO_OFF")
endif()
PCL_SUBSUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSUBSYS_NAME}" "${SUBSUBSYS_DESC}" ${DEFAULT} "${REASON}")
-PCL_SUBSUBSYS_DEPEND(build "${SUBSYS_NAME}" "${SUBSUBSYS_NAME}" DEPS ${SUBSUBSYS_DEPS})
+PCL_SUBSUBSYS_DEPEND(build "${SUBSYS_NAME}" "${SUBSUBSYS_NAME}" DEPS ${SUBSUBSYS_DEPS} EXT_DEPS ${SUBSUBSYS_EXT_DEPS})
PCL_ADD_DOC(${SUBSUBSYS_NAME})
# Build pcl_cc_tool_interface as static library, to fix issue mentioned in #2708
set(PCL_LIB_TYPE_ORIGIN ${PCL_LIB_TYPE})
set(PCL_LIB_TYPE STATIC)
-QT5_WRAP_CPP(INTERFACE_HEADERS_MOC ${INTERFACE_HEADERS} OPTIONS -DBOOST_TT_HAS_OPERATOR_HPP_INCLUDED -DBOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION)
-PCL_ADD_LIBRARY(pcl_cc_tool_interface COMPONENT ${SUBSUBSYS_NAME} SOURCES ${INTERFACE_HEADERS} ${INTERFACE_SOURCES} ${INTERFACE_HEADERS_MOC})
-target_link_libraries(pcl_cc_tool_interface pcl_common pcl_filters pcl_search pcl_visualization ${VTK_LIBRARIES} Qt5::Widgets)
+
+PCL_ADD_LIBRARY(pcl_cc_tool_interface COMPONENT ${SUBSUBSYS_NAME} SOURCES ${INTERFACE_HEADERS} ${INTERFACE_SOURCES})
+
+set(vtk_libs ${VTK_LIBRARIES})
+#TODO: Update when CMAKE 3.10 is available
+if (NOT (${VTK_VERSION} VERSION_LESS 9.0))
+ set(vtk_libs VTK::GUISupportQt)
+endif()
+target_link_libraries(pcl_cc_tool_interface pcl_common pcl_filters pcl_search pcl_visualization Qt5::Widgets ${vtk_libs})
+
set(PCL_LIB_TYPE ${PCL_LIB_TYPE_ORIGIN})
if(APPLE)
"include/pcl/${SUBSYS_NAME}/${SUBSUBSYS_NAME}/item_inspector.h"
"include/pcl/${SUBSYS_NAME}/${SUBSUBSYS_NAME}/merge_selection.h"
"include/pcl/${SUBSYS_NAME}/${SUBSUBSYS_NAME}/project_model.h"
- "include/pcl/${SUBSYS_NAME}/${SUBSUBSYS_NAME}/properties_model.h"
"include/pcl/${SUBSYS_NAME}/${SUBSUBSYS_NAME}/signal_multiplexer.h"
"include/pcl/${SUBSYS_NAME}/${SUBSUBSYS_NAME}/toolbox_model.h"
"include/pcl/${SUBSYS_NAME}/${SUBSUBSYS_NAME}/transform_clouds.h"
set(uis src/cloud_composer_main_window.ui)
set(resources resources/resources.qrc)
-QT5_WRAP_UI(cloud_composer_ui ${uis})
-QT5_WRAP_CPP(cloud_composer_moc ${incs} OPTIONS -DBOOST_TT_HAS_OPERATOR_HPP_INCLUDED -DBOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION)
-QT5_ADD_RESOURCES(resource_srcs ${resources})
+list(APPEND CMAKE_AUTOUIC_SEARCH_PATHS "src")
set(EXE_NAME "pcl_${SUBSUBSYS_NAME}")
-PCL_ADD_EXECUTABLE(${EXE_NAME} COMPONENT ${SUBSUBSYS_NAME} SOURCES ${cloud_composer_ui} ${cloud_composer_moc} ${srcs} ${resource_srcs} ${impl_incs})
+PCL_ADD_EXECUTABLE(${EXE_NAME} COMPONENT ${SUBSUBSYS_NAME} SOURCES ${uis} ${incs} ${srcs} ${resources} ${impl_incs})
target_link_libraries("${EXE_NAME}" pcl_cc_tool_interface pcl_common pcl_io pcl_visualization pcl_filters Qt5::Widgets)
+
+
# Install include files
PCL_ADD_INCLUDES("${SUBSUBSYS_NAME}" "${SUBSUBSYS_NAME}" ${incs} ${item_incs} ${selector_incs})
PCL_ADD_INCLUDES("${SUBSUBSYS_NAME}" "${SUBSUBSYS_NAME}/impl" ${impl_incs})
project(pcl_cc_tool_${TOOL_NAME})
- #message("Making plugin " pcl_cc_tool_${TOOL_NAME})
- QT5_WRAP_CPP(TOOL_HEADERS_MOC ${TOOL_HEADERS} OPTIONS -DBOOST_TT_HAS_OPERATOR_HPP_INCLUDED -DBOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION)
set(TOOL_TARGET pcl_cc_tool_${TOOL_NAME})
- # message("Files:" ${TOOL_SOURCES} ${TOOL_HEADERS_MOC})
- PCL_ADD_LIBRARY(${TOOL_TARGET} COMPONENT ${SUBSYS_NAME} SOURCES ${TOOL_SOURCES} ${TOOL_HEADERS} ${TOOL_HEADERS_MOC})
+ PCL_ADD_LIBRARY(${TOOL_TARGET} COMPONENT ${SUBSYS_NAME} SOURCES ${TOOL_SOURCES} ${TOOL_HEADERS})
+
if(WIN32)
set_target_properties (${TOOL_TARGET} PROPERTIES RUNTIME_OUTPUT_DIRECTORY_DEBUG ${CLOUD_COMPOSER_PLUGIN_DIR}
RUNTIME_OUTPUT_DIRECTORY_RELEASE ${CLOUD_COMPOSER_PLUGIN_DIR})
else()
set_target_properties (${TOOL_TARGET} PROPERTIES LIBRARY_OUTPUT_DIRECTORY ${CLOUD_COMPOSER_PLUGIN_DIR})
endif()
+
add_definitions(${QT_DEFINITIONS})
add_definitions(-DQT_PLUGIN)
- add_definitions(-DQT_NO_DEBUG)
add_definitions(-DQT_SHARED)
target_link_libraries(${TOOL_TARGET} pcl_cc_tool_interface pcl_common pcl_io ${DEPS} Qt5::Widgets)
#include <pcl/point_types.h>
#include <ui_cloud_composer_main_window.h>
+
class QTreeView;
namespace pcl
#include <vtkEventQtSlotConnect.h>
#include <pcl/visualization/pcl_visualizer.h>
+#include <pcl/visualization/qvtk_compatibility.h>
#include <pcl/apps/cloud_composer/point_selectors/interactor_style_switch.h>
+
+#include <vtkSmartPointer.h>
+#include <vtkOrientationMarkerWidget.h>
+#include <vtkAxesActor.h>
+#include <vtkVersion.h>
+
class QItemSelection;
class QStandardItem;
void
setModel (ProjectModel* new_model);
+
ProjectModel*
getModel () const { return model_; }
- QVTKWidget*
- getQVTK() const {return qvtk_; }
+ PCLQVTKWidget*
+ getQVTK() const { return qvtk_; }
pcl::visualization::PCLVisualizer::Ptr
getPCLVisualizer () const { return vis_; }
pcl::visualization::PCLVisualizer::Ptr vis_;
ProjectModel* model_;
- QVTKWidget* qvtk_;
+
+ PCLQVTKWidget* qvtk_;
+
vtkSmartPointer<InteractorStyleSwitch> style_switch_;
vtkSmartPointer<vtkOrientationMarkerWidget> axes_widget_;
#include <QDebug>
#include <pcl/apps/cloud_composer/items/cloud_composer_item.h>
+
+#include <pcl/point_types.h>
+#include <pcl/point_cloud.h>
+#include <pcl/PCLPointCloud2.h>
#include <pcl/memory.h>
#include <pcl/pcl_macros.h>
-#include <pcl/visualization/pcl_visualizer.h>
#include <pcl/search/kdtree.h>
+#include <pcl/visualization/point_cloud_geometry_handlers.h>
+#include <pcl/visualization/point_cloud_color_handlers.h>
+#include <pcl/visualization/pcl_visualizer.h>
+
//Typedefs to make things sane
using GeometryHandler = pcl::visualization::PointCloudGeometryHandler<pcl::PCLPointCloud2>;
#include <pcl/features/fpfh.h>
#include <pcl/apps/cloud_composer/items/cloud_composer_item.h>
+#include <pcl/visualization/qvtk_compatibility.h>
#include <pcl/visualization/pcl_plotter.h>
-class QVTKWidget;
-
namespace pcl
{
namespace cloud_composer
pcl::PointCloud<pcl::FPFHSignature33>::Ptr fpfh_ptr_;
double radius_;
pcl::visualization::PCLPlotter::Ptr plot_;
- QVTKWidget *qvtk_;
+ PCLQVTKWidget* qvtk_;
QWidget *hist_page_;
};
#include <pcl/apps/cloud_composer/point_selectors/interactor_style_switch.h>
+#include <vtkInteractorStyleTrackballActor.h>
+
namespace pcl
{
namespace cloud_composer
#include <QMap>
-#include <pcl/visualization/vtk.h>
#include <pcl/visualization/interactor_style.h>
#include <pcl/visualization/common/actor_map.h>
#include <pcl/visualization/common/ren_win_interact_map.h>
#include <pcl/visualization/pcl_visualizer.h>
+#include <vtkSmartPointer.h>
+#include <vtkAreaPicker.h>
+#include <vtkPointPicker.h>
+#include <vtkRenderWindowInteractor.h>
+#include <vtkCommand.h>
+#include <vtkRendererCollection.h>
+#include <vtkInteractorStyle.h>
+
class QVTKWidget;
namespace pcl
#pragma once
-#include <pcl/visualization/vtk.h>
#include <pcl/apps/cloud_composer/items/cloud_item.h>
namespace pcl
#include <pcl/apps/cloud_composer/point_selectors/interactor_style_switch.h>
+#include <vtkSmartPointer.h>
+#include <vtkRendererCollection.h>
+#include <vtkInteractorStyleRubberBandPick.h>
+
namespace pcl
{
namespace cloud_composer
#include <pcl/apps/cloud_composer/point_selectors/interactor_style_switch.h>
+#include <vtkInteractorStyleTrackballActor.h>
+#include <vtkSmartPointer.h>
+#include <vtkMatrix4x4.h>
+
namespace pcl
{
namespace cloud_composer
#pragma once
-#include <pcl/visualization/vtk.h>
#include <pcl/apps/cloud_composer/items/cloud_item.h>
+#include <vtkSmartPointer.h>
+#include <vtkPolyData.h>
+#include <vtkActor.h>
+#include <vtkDataSetMapper.h>
+#include <vtkRenderer.h>
+
namespace pcl
{
namespace cloud_composer
#include <QStandardItemModel>
-#include <pcl/common/boost.h>
-
namespace pcl
{
namespace cloud_composer
euclidean_segmentation.setInputCloud (input_cloud);
euclidean_segmentation.segment (euclidean_labels, euclidean_label_indices);
- pcl::IndicesPtr extracted_indices (new std::vector<int> ());
+ pcl::IndicesPtr extracted_indices (new pcl::Indices ());
for (std::size_t i = 0; i < euclidean_label_indices.size (); i++)
{
if (euclidean_label_indices[i].indices.size () >= (std::size_t) min_cluster_size)
#include <pcl/apps/cloud_composer/point_selectors/selection_event.h>
#include <pcl/apps/cloud_composer/point_selectors/manipulation_event.h>
+#include <vtkGenericOpenGLRenderWindow.h>
+
#include <QDebug>
-#include <QVTKWidget.h>
pcl::cloud_composer::CloudView::CloudView (QWidget* parent)
: QWidget (parent)
{
- vis_.reset (new pcl::visualization::PCLVisualizer ("", false));
- vis_->getInteractorStyle ()->setKeyboardModifier (pcl::visualization::INTERACTOR_KB_MOD_SHIFT);
+ qvtk_ = new PCLQVTKWidget(this);
//Create the QVTKWidget
- qvtk_ = new QVTKWidget (this);
- qvtk_->SetRenderWindow (vis_->getRenderWindow ());
+#if VTK_MAJOR_VERSION > 8
+ auto renderer = vtkSmartPointer<vtkRenderer>::New();
+ auto renderWindow = vtkSmartPointer<vtkGenericOpenGLRenderWindow>::New();
+ renderWindow->AddRenderer(renderer);
+ vis_.reset(new pcl::visualization::PCLVisualizer(renderer, renderWindow, "", false));
+#else
+ vis_.reset(new pcl::visualization::PCLVisualizer("", false));
+#endif // VTK_MAJOR_VERSION > 8
+ setRenderWindowCompat(*qvtk_, *(vis_->getRenderWindow()));
+ vis_->setupInteractor(getInteractorCompat(*qvtk_), getRenderWindowCompat(*qvtk_), style_switch_);
+ vis_->getInteractorStyle()->setKeyboardModifier(pcl::visualization::INTERACTOR_KB_MOD_SHIFT);
+
initializeInteractorSwitch ();
- vis_->setupInteractor (qvtk_->GetInteractor (), qvtk_->GetRenderWindow (), style_switch_);
QGridLayout *mainLayout = new QGridLayout (this);
mainLayout-> addWidget (qvtk_,0,0);
: QWidget (parent)
{
model_ = model;
- vis_.reset (new pcl::visualization::PCLVisualizer ("", false));
- // vis_->getInteractorStyle ()->setKeyboardModifier (pcl::visualization::INTERACTOR_KB_MOD_SHIFT);
+
+ qvtk_ = new PCLQVTKWidget(this);
//Create the QVTKWidget
- qvtk_ = new QVTKWidget (this);
- qvtk_->SetRenderWindow (vis_->getRenderWindow ());
+#if VTK_MAJOR_VERSION > 8
+ auto renderer = vtkSmartPointer<vtkRenderer>::New();
+ auto renderWindow = vtkSmartPointer<vtkGenericOpenGLRenderWindow>::New();
+ renderWindow->AddRenderer(renderer);
+ vis_.reset(new pcl::visualization::PCLVisualizer(renderer, renderWindow, "", false));
+#else
+ vis_.reset(new pcl::visualization::PCLVisualizer("", false));
+#endif // VTK_MAJOR_VERSION > 8
+ setRenderWindowCompat(*qvtk_, *(vis_->getRenderWindow()));
+ vis_->setupInteractor(getInteractorCompat(*qvtk_), getRenderWindowCompat(*qvtk_), style_switch_);
+ //vis_->getInteractorStyle()->setKeyboardModifier(pcl::visualization::INTERACTOR_KB_MOD_SHIFT);
+
initializeInteractorSwitch ();
- vis_->setupInteractor (qvtk_->GetInteractor (), qvtk_->GetRenderWindow (), style_switch_);
setModel(model);
QGridLayout *mainLayout = new QGridLayout (this);
connectSignalsAndSlots();
//Refresh the view
qvtk_->show();
- qvtk_->update ();
+ refresh();
- // vis_->addOrientationMarkerWidgetAxes (qvtk_->GetInteractor ());
+ // vis_->addOrientationMarkerWidgetAxes (getInteractorCompat(qvtk_));
}
void
void
pcl::cloud_composer::CloudView::refresh ()
{
+#if VTK_MAJOR_VERSION > 8
+ qvtk_->renderWindow()->Render();
+#else
qvtk_->update ();
+#endif // VTK_MAJOR_VERSION > 8
}
void
{
item->paintView (vis_);
}
- qvtk_->update ();
+ refresh();
}
rowsInserted(new_item->index(),0,new_item->rowCount ()-1);
}
- qvtk_->update ();
-
+ refresh();
}
void
if (item_to_remove->rowCount () > 0)
rowsAboutToBeRemoved(item_to_remove->index(),0,item_to_remove->rowCount ()-1);
}
- qvtk_->update ();
+ refresh();
}
void
pcl::cloud_composer::CloudView::paintEvent (QPaintEvent*)
{
- qvtk_->update ();
+ refresh();
}
void
pcl::cloud_composer::CloudView::resizeEvent (QResizeEvent*)
{
- qvtk_->update ();
+ refresh();
}
void
}
}
}
- qvtk_->update ();
+ refresh();
}
void
if (visible)
{
qDebug () << "Adding coordinate system!";
- vis_->addOrientationMarkerWidgetAxes ( qvtk_->GetInteractor() );
+ vis_->addOrientationMarkerWidgetAxes(getInteractorCompat(*qvtk_));
}
else
{
vis_->removeOrientationMarkerWidgetAxes ();
}
- qvtk_->update ();
+ refresh();
}
void
axes_widget_ = vtkSmartPointer<vtkOrientationMarkerWidget>::New ();
axes_widget_->SetOutlineColor ( 0.9300, 0.5700, 0.1300 );
axes_widget_->SetOrientationMarker( axes );
- axes_widget_->SetInteractor( qvtk_->GetInteractor () );
+ axes_widget_->SetInteractor(getInteractorCompat(*qvtk_));
axes_widget_->SetViewport( 0.0, 0.0, 0.4, 0.4 );
axes_widget_->SetEnabled( 1 );
axes_widget_->InteractiveOn();
{
axes_widget_->SetEnabled (false);
}
-
-
}
//////// Interactor Functions
{
style_switch_ = vtkSmartPointer<InteractorStyleSwitch>::New();
style_switch_->initializeInteractorStyles (vis_, model_);
- style_switch_->SetInteractor (qvtk_->GetInteractor ());
+ style_switch_->SetInteractor(getInteractorCompat(*qvtk_));
style_switch_->setCurrentInteractorStyle (interactor_styles::PCL_VISUALIZER);
//Connect the events!
#include <QGridLayout>
-#include <QVTKWidget.h>
-
pcl::cloud_composer::FPFHItem::FPFHItem (QString name, const pcl::PointCloud<pcl::FPFHSignature33>::Ptr& fpfh_ptr, double radius)
: CloudComposerItem (std::move(name))
, fpfh_ptr_ (fpfh_ptr)
if (!plot_)
{
plot_.reset (new pcl::visualization::PCLPlotter);
- qvtk_ = new QVTKWidget ();
+ qvtk_ = new PCLQVTKWidget();
hist_page_ = new QWidget ();
QGridLayout *mainLayout = new QGridLayout (hist_page_);
mainLayout-> addWidget (qvtk_,0,0);
//Plot the histogram
plot_->addFeatureHistogram (*fpfh_ptr_, fpfh_ptr_->width, data(ItemDataRole::ITEM_ID).toString().toStdString ());
//Set the render window of the QVTK widget, update
- plot_->setViewInteractor (vtkSmartPointer<vtkRenderWindowInteractor> (qvtk_->GetInteractor ()));
- qvtk_->SetRenderWindow (plot_->getRenderWindow ());
+ plot_->setViewInteractor(getInteractorCompat(*qvtk_));
+ setRenderWindowCompat(*qvtk_, *(plot_->getRenderWindow()));
+#if VTK_MAJOR_VERSION > 8
+ qvtk_->renderWindow()->Render();
+#else
+ qvtk_->update();
+#endif // VTK_MAJOR_VERSION > 8
qvtk_->show ();
- qvtk_->update ();
+
QMap <QString, QWidget*> tabs;
tabs.insert ("Histogram",hist_page_);
#include <QDebug>
+
+#include <vtkObjectFactory.h> // For vtkStandardNewMacro
+#include <vtkTransform.h>
+
namespace pcl
{
namespace cloud_composer
#include <QDebug>
+#include <vtkCallbackCommand.h>
+#include <vtkObjectFactory.h>
+
namespace pcl
{
namespace cloud_composer
#include <QDebug>
+#include <vtkSmartPointer.h>
+#include <vtkIdFilter.h>
+#include <vtkExtractGeometry.h>
+#include <vtkVertexGlyphFilter.h>
+#include <vtkPlanes.h>
+#include <vtkAreaPicker.h>
+#include <vtkObjectFactory.h>
+
namespace pcl
{
namespace cloud_composer
#include <QDebug>
#include <QItemSelectionModel>
+#include <vtkSmartPointer.h>
+#include <vtkMatrix4x4.h>
+#include <vtkLODActor.h>
+#include <vtkInteractorStyleTrackballActor.h>
+#include <vtkRenderWindowInteractor.h>
+#include <vtkTransform.h>
+#include <vtkObjectFactory.h>
+
+
namespace pcl
{
namespace cloud_composer
#include <QMessageBox>
#include <QThread>
+#include <vtkSmartPointer.h>
+#include <vtkImageData.h>
+#include <vtkImageReader2Factory.h>
+#include <vtkImageReader2.h>
+
pcl::cloud_composer::ProjectModel::ProjectModel (QObject* parent)
: QStandardItemModel (parent)
{
#include <pcl/apps/cloud_composer/transform_clouds.h>
#include <pcl/apps/cloud_composer/items/cloud_item.h>
-#include <pcl/filters/extract_indices.h>
#include <pcl/point_types.h>
#include <pcl/apps/cloud_composer/impl/transform_clouds.hpp>
Eigen::Vector4f source_origin = input_item->data (ItemDataRole::ORIGIN).value<Eigen::Vector4f> ();
Eigen::Quaternionf source_orientation = input_item->data (ItemDataRole::ORIENTATION).value<Eigen::Quaternionf> ();
//Vector to accumulate the extracted indices
- pcl::IndicesPtr extracted_indices (new std::vector<int> ());
+ pcl::IndicesPtr extracted_indices (new pcl::Indices ());
//Put found clusters into new cloud_items!
qDebug () << "Found "<<cluster_indices.size ()<<" clusters!";
int cluster_count = 0;
set(SUBSUBSYS_DESC "In-hand scanner for small objects")
set(SUBSUBSYS_DEPS common features io kdtree apps)
set(SUBSUBSYS_LIBS pcl_common pcl_features pcl_io pcl_kdtree)
+set(SUBSUBSYS_EXT_DEPS Qt5 OpenGL OpenGL_GLU openni)
################################################################################
-# QT5 Found?
-if(NOT Qt5_FOUND)
- set(DEFAULT AUTO_OFF)
- set(REASON "Qt5 was not found.")
-elseif(NOT ${DEFAULT} STREQUAL "AUTO_OFF")
- set(DEFAULT TRUE)
- set(REASON)
-endif()
-
-# OpenGL
-if(NOT OPENGL_FOUND AND NOT OPENGL_GLU_FOUND)
- set(DEFAULT AUTO_OFF)
- set(REASON "OpenGL & GLU are required for the in_hand_scanner app!")
-elseif(NOT ${DEFAULT} STREQUAL "AUTO_OFF")
- set(DEFAULT TRUE)
- set(REASON)
-endif()
-
-#OpenNI
-if(NOT WITH_OPENNI)
- set(DEFAULT AUTO_OFF)
- set(REASON "OpenNI was not found or was disabled by the user.")
-elseif(NOT ${DEFAULT} STREQUAL "AUTO_OFF")
- set(DEFAULT TRUE)
- set(REASON)
-endif()
# Default to not building for now
if(${DEFAULT} STREQUAL "TRUE")
endif()
pcl_subsubsys_option(build "${SUBSYS_NAME}" "${SUBSUBSYS_NAME}" "${SUBSYS_DESC}" ${DEFAULT} "${REASON}")
-pcl_subsubsys_depend(build "${SUBSYS_NAME}" "${SUBSUBSYS_NAME}" DEPS ${SUBSYS_DEPS} EXT_DEPS Qt5 OpenGL OpenGL_GLU openni)
+pcl_subsubsys_depend(build "${SUBSYS_NAME}" "${SUBSUBSYS_NAME}" DEPS ${SUBSYS_DEPS} EXT_DEPS ${SUBSUBSYS_EXT_DEPS})
pcl_add_doc("${SUBSUBSYS_NAME}")
"include/pcl/${SUBSYS_NAME}/${SUBSUBSYS_NAME}/mesh_processing.h"
"include/pcl/${SUBSYS_NAME}/${SUBSUBSYS_NAME}/utils.h"
"include/pcl/${SUBSYS_NAME}/${SUBSUBSYS_NAME}/visibility_confidence.h"
+ "include/pcl/${SUBSYS_NAME}/${SUBSUBSYS_NAME}/main_window.h"
+ "include/pcl/${SUBSYS_NAME}/${SUBSUBSYS_NAME}/help_window.h"
+ "include/pcl/${SUBSYS_NAME}/${SUBSUBSYS_NAME}/opengl_viewer.h"
+ "include/pcl/${SUBSYS_NAME}/${SUBSUBSYS_NAME}/in_hand_scanner.h"
)
set(IMPL_INCS
src/visibility_confidence.cpp
)
-# Qt
-set(MOC_IN_HAND_SCANNER_INC "include/pcl/${SUBSYS_NAME}/${SUBSUBSYS_NAME}/in_hand_scanner.h")
-set(MOC_OPENGL_VIEWER_INC "include/pcl/${SUBSYS_NAME}/${SUBSUBSYS_NAME}/opengl_viewer.h")
-set(MOC_OFFLINE_INTEGRATION_INC "include/pcl/${SUBSYS_NAME}/${SUBSUBSYS_NAME}/offline_integration.h")
-
-set(MOC_MAIN_WINDOW_INC "include/pcl/${SUBSYS_NAME}/${SUBSUBSYS_NAME}/main_window.h")
-set(MOC_HELP_WINDOW_INC "include/pcl/${SUBSYS_NAME}/${SUBSUBSYS_NAME}/help_window.h")
-set(UI_MAIN_WINDOW src/main_window.ui)
-set(UI_HELP_WINDOW src/help_window.ui)
+set(UI
+ src/main_window.ui
+ src/help_window.ui)
# Offline integration
set(OI_INCS
"include/pcl/${SUBSYS_NAME}/${SUBSUBSYS_NAME}/integration.h"
"include/pcl/${SUBSYS_NAME}/${SUBSUBSYS_NAME}/visibility_confidence.h"
+ "include/pcl/${SUBSYS_NAME}/${SUBSUBSYS_NAME}/opengl_viewer.h"
+ "include/pcl/${SUBSYS_NAME}/${SUBSUBSYS_NAME}/offline_integration.h"
)
set(OI_SRCS
return()
endif()
-qt5_wrap_cpp(MOC_IN_HAND_SCANNER_SRC "${MOC_IN_HAND_SCANNER_INC}")
-qt5_wrap_cpp(MOC_OPENGL_VIEWER_SRC "${MOC_OPENGL_VIEWER_INC}" OPTIONS -DBOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION)
-qt5_wrap_cpp(MOC_OFFLINE_INTEGRATION_SRC "${MOC_OFFLINE_INTEGRATION_INC}" OPTIONS -DBOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION)
-
-qt5_wrap_cpp(MOC_MAIN_WINDOW_SRC "${MOC_MAIN_WINDOW_INC}")
-qt5_wrap_cpp(MOC_HELP_WINDOW_SRC "${MOC_HELP_WINDOW_INC}")
-qt5_wrap_ui(UI_MAIN_WINDOW_INC "${UI_MAIN_WINDOW}")
-qt5_wrap_ui(UI_HELP_WINDOW_INC "${UI_HELP_WINDOW}")
-
include_directories("${CMAKE_CURRENT_BINARY_DIR}" "${CMAKE_CURRENT_SOURCE_DIR}/include")
-# In-hand scanner
-list(APPEND INCS "${MOC_IN_HAND_SCANNER_INC}" "${MOC_OPENGL_VIEWER_INC}" "${MOC_MAIN_WINDOW_INC}" "${MOC_HELP_WINDOW_INC}" "${UI_MAIN_WINDOW_INC}" "${UI_HELP_WINDOW_INC}")
-list(APPEND SRCS "${MOC_IN_HAND_SCANNER_SRC}" "${MOC_OPENGL_VIEWER_SRC}" "${MOC_MAIN_WINDOW_SRC}" "${MOC_HELP_WINDOW_SRC}")
-
set(EXE_NAME "pcl_${SUBSUBSYS_NAME}")
-PCL_ADD_EXECUTABLE(${EXE_NAME} COMPONENT ${SUBSUBSYS_NAME} SOURCES ${SRCS} ${INCS} ${IMPL_INCS} BUNDLE)
+
+PCL_ADD_EXECUTABLE(
+ ${EXE_NAME}
+ COMPONENT
+ ${SUBSUBSYS_NAME}
+ SOURCES
+ ${SRCS}
+ ${INCS}
+ ${IMPL_INCS}
+ ${UI}
+ BUNDLE)
target_link_libraries("${EXE_NAME}" ${SUBSUBSYS_LIBS} ${OPENGL_LIBRARIES} Qt5::Concurrent Qt5::Widgets Qt5::OpenGL)
pcl_add_includes("${SUBSUBSYS_NAME}" "${SUBSUBSYS_NAME}" ${INCS})
PCL_MAKE_PKGCONFIG(${EXE_NAME} COMPONENT ${SUBSUBSYS_NAME} DESC ${SUBSUBSYS_DESC})
-# Offline integration
-list(APPEND OI_INCS "${MOC_OPENGL_VIEWER_INC}" "${MOC_OFFLINE_INTEGRATION_INC}")
-list(APPEND OI_SRCS "${MOC_OPENGL_VIEWER_SRC}" "${MOC_OFFLINE_INTEGRATION_SRC}")
+PCL_ADD_EXECUTABLE(
+ pcl_offline_integration
+ COMPONENT
+ ${SUBSUBSYS_NAME}
+ SOURCES
+ ${OI_SRCS}
+ ${OI_INCS}
+ BUNDLE)
-PCL_ADD_EXECUTABLE(pcl_offline_integration COMPONENT ${SUBSUBSYS_NAME} SOURCES ${OI_SRCS} ${OI_INCS} BUNDLE)
target_link_libraries(pcl_offline_integration ${SUBSUBSYS_LIBS} ${OPENGL_LIBRARIES} Qt5::Concurrent Qt5::Widgets Qt5::OpenGL)
# Add to the compound apps target
#ifdef __GNUC__
# pragma GCC system_header
#endif
-
+PCL_DEPRECATED_HEADER(1, 15, "Please include the needed boost headers directly.")
#include <boost/math/special_functions/fpclassify.hpp>
#include <boost/signals2/connection.hpp>
#ifdef __GNUC__
# pragma GCC system_header
#endif
-
+PCL_DEPRECATED_HEADER(1, 15, "Please include the needed eigen headers directly.")
#include <Eigen/Core>
#include <Eigen/Geometry>
#include <Eigen/Cholesky>
#pragma once
#include <pcl/pcl_exports.h>
-#include <pcl/apps/in_hand_scanner/boost.h>
-#include <pcl/apps/in_hand_scanner/eigen.h>
#include <pcl/apps/in_hand_scanner/common_types.h>
#include <pcl/kdtree/kdtree.h>
this->directions = other.directions;
}
+ inline PointIHS& operator=(const PointIHS& other) = default;
+
inline PointIHS (const pcl::PointXYZRGBNormal& other, const float weight)
{
this->x = other.x;
#include <pcl/memory.h>
#include <pcl/pcl_exports.h>
#include <pcl/pcl_macros.h>
-#include <pcl/apps/in_hand_scanner/boost.h>
#include <pcl/apps/in_hand_scanner/common_types.h>
#include <pcl/apps/in_hand_scanner/opengl_viewer.h>
-
+#include <boost/signals2/connection.hpp> // for connection
#include <mutex>
#include <string>
#include <sstream>
#pragma once
#include <pcl/pcl_exports.h>
-#include <pcl/apps/in_hand_scanner/eigen.h>
#include <pcl/apps/in_hand_scanner/common_types.h>
#include <pcl/apps/in_hand_scanner/utils.h>
#include <pcl/features/integral_image_normal.h>
#include <pcl/common/time.h>
#include <pcl/features/integral_image_normal.h>
#include <pcl/apps/in_hand_scanner/common_types.h>
-#include <pcl/apps/in_hand_scanner/boost.h>
-#include <pcl/apps/in_hand_scanner/eigen.h>
#include <pcl/apps/in_hand_scanner/opengl_viewer.h>
#include <mutex>
#include <pcl/pcl_exports.h>
#include <pcl/pcl_macros.h>
#include <pcl/common/time.h>
-#include <pcl/apps/in_hand_scanner/boost.h>
#include <pcl/apps/in_hand_scanner/common_types.h>
-#include <pcl/apps/in_hand_scanner/eigen.h>
#include <QGLWidget>
#include <pcl/memory.h>
#include <pcl/pcl_exports.h>
#include <pcl/pcl_macros.h>
-#include <pcl/apps/in_hand_scanner/eigen.h>
namespace pcl
{
kd_tree_->setInputCloud (cloud_model_selected);
t_build = sw.getTime ();
- std::vector <int> index (1);
+ pcl::Indices index (1);
std::vector <float> squared_distance (1);
// Clouds with one to one correspondences
#include <pcl/common/point_tests.h>
#include <pcl/features/integral_image_normal.h>
-#include <pcl/apps/in_hand_scanner/boost.h>
////////////////////////////////////////////////////////////////////////////////
#include <limits>
#include <pcl/kdtree/kdtree_flann.h>
-#include <pcl/apps/in_hand_scanner/boost.h>
#include <pcl/apps/in_hand_scanner/visibility_confidence.h>
#include <pcl/apps/in_hand_scanner/utils.h>
xyz_model->push_back (PointXYZ (pt.x, pt.y, pt.z));
}
kd_tree_->setInputCloud (xyz_model);
- std::vector <int> index (1);
+ pcl::Indices index (1);
std::vector <float> squared_distance (1);
mesh_model->reserveVertices (mesh_model->sizeVertices () + cloud_data->size ());
*/
#include <pcl/apps/in_hand_scanner/visibility_confidence.h>
+#include <Eigen/Geometry> // for Isometry3f
pcl::ihs::Dome::Dome ()
{
#pragma once
-#include <pcl/common/common.h>
#include <pcl/features/normal_3d.h>
#include <pcl/filters/passthrough.h>
#include <pcl/filters/project_inliers.h>
#include <pcl/filters/voxel_grid.h>
-#include <pcl/sample_consensus/method_types.h>
-#include <pcl/sample_consensus/model_types.h>
#include <pcl/search/kdtree.h> // for KdTree
#include <pcl/segmentation/extract_clusters.h>
#include <pcl/segmentation/extract_polygonal_prism_data.h>
pcl::visualization::PCLVisualizer& vis)
{
for (std::size_t i = 0; i < heads.size(); i++) {
- std::stringstream name;
- name << "sphere" << i;
+ std::string name = "sphere" + std::to_string(i);
pcl::PointXYZ center_point;
center_point.x = heads[i][0];
center_point.y = heads[i][1];
center_point.z = heads[i][2];
- vis.addSphere(center_point, 0.02, 0, 255, 0, name.str());
+ vis.addSphere(center_point, 0.02, 0, 255, 0, name);
pcl::ModelCoefficients cylinder_coeff;
cylinder_coeff.values.resize(7); // We need 7 values
cylinder_coeff.values[5] = vec[2];
cylinder_coeff.values[6] = 0.01f;
- name << "cylinder";
- vis.addCylinder(cylinder_coeff, name.str());
+ name += "cylinder";
+ vis.addCylinder(cylinder_coeff, name);
}
}
*
*/
-#include <pcl/common/time.h>
-#include <pcl/features/integral_image_normal.h>
#include <pcl/filters/extract_indices.h> // for ExtractIndices
-#include <pcl/visualization/pcl_visualizer.h>
-#include <pcl/memory.h> // for pcl::make_shared
+#include <pcl/memory.h> // for pcl::make_shared
template <typename PointType>
void
pass_.filter(*cloud_filtered_);
if (int(cloud_filtered_->size()) < k_) {
- PCL_WARN("[DominantPlaneSegmentation] Filtering returned %lu points! Aborting.",
+ PCL_WARN("[DominantPlaneSegmentation] Filtering returned %lu points! Aborting.\n",
cloud_filtered_->size());
return;
}
seg_.segment(*table_inliers_, *table_coefficients_);
if (table_inliers_->indices.empty()) {
- PCL_WARN("[DominantPlaneSegmentation] No Plane Inliers points! Aborting.");
+ PCL_WARN("[DominantPlaneSegmentation] No Plane Inliers points! Aborting.\n");
return;
}
seg_.segment(*table_inliers_, *table_coefficients_);
if (table_inliers_->indices.empty()) {
- PCL_WARN("[DominantPlaneSegmentation] No Plane Inliers points! Aborting.");
+ PCL_WARN("[DominantPlaneSegmentation] No Plane Inliers points! Aborting.\n");
return;
}
binary_cloud->points.resize(input_->size());
binary_cloud->is_dense = input_->is_dense;
- for (const int& idx : cloud_object_indices.indices) {
+ for (const auto& idx : cloud_object_indices.indices) {
(*binary_cloud)[idx].getVector4fMap() = (*input_)[idx].getVector4fMap();
(*binary_cloud)[idx].intensity = 1.0;
}
pass_.filter(*cloud_filtered_);
if (int(cloud_filtered_->size()) < k_) {
- PCL_WARN("[DominantPlaneSegmentation] Filtering returned %lu points! Aborting.",
+ PCL_WARN("[DominantPlaneSegmentation] Filtering returned %lu points! Aborting.\n",
cloud_filtered_->size());
return;
}
seg_.segment(*table_inliers_, *table_coefficients_);
if (table_inliers_->indices.empty()) {
- PCL_WARN("[DominantPlaneSegmentation] No Plane Inliers points! Aborting.");
+ PCL_WARN("[DominantPlaneSegmentation] No Plane Inliers points! Aborting.\n");
return;
}
* POSSIBILITY OF SUCH DAMAGE.
*/
-#include <pcl/common/angles.h>
#include <pcl/common/common.h>
#include <pcl/common/time.h>
-#include <pcl/common/transforms.h>
-#include <pcl/console/parse.h>
-#include <pcl/console/print.h>
-#include <pcl/io/pcd_grabber.h>
-#include <pcl/io/pcd_io.h>
#include <pcl/registration/transformation_estimation_svd.h>
#include <pcl/visualization/pcl_visualizer.h>
#include <pcl/visualization/point_cloud_handlers.h>
#include <QMainWindow>
#include <QMutex>
#include <QTimer>
-#include <ui_manual_registration.h>
using PointT = pcl::PointXYZRGBA;
DstPointPickCallback(const pcl::visualization::PointPickingEvent& event, void*);
protected:
+ void
+ refreshView();
+
pcl::visualization::PCLVisualizer::Ptr vis_src_;
pcl::visualization::PCLVisualizer::Ptr vis_dst_;
#include <pcl/io/pcd_io.h>
#include <pcl/kdtree/kdtree_flann.h>
+#include <cfloat> // for FLT_MAX
+
namespace pcl {
/**
ResultPtr
classify(const PointT& p_q, double radius, float gaussian_param, int max_nn = INT_MAX)
{
- std::vector<int> k_indices;
+ pcl::Indices k_indices;
std::vector<float> k_sqr_distances;
getSimilarExemplars(p_q, radius, k_indices, k_sqr_distances, max_nn);
return getGaussianBestScores(gaussian_param, k_indices, k_sqr_distances);
int
getKNearestExemplars(const PointT& p_q,
int k,
- std::vector<int>& k_indices,
+ pcl::Indices& k_indices,
std::vector<float>& k_sqr_distances)
{
k_indices.resize(k);
int
getSimilarExemplars(const PointT& p_q,
double radius,
- std::vector<int>& k_indices,
+ pcl::Indices& k_indices,
std::vector<float>& k_sqr_distances,
int max_nn = INT_MAX)
{
* \return a square distance to each training class
*/
std::shared_ptr<std::vector<float>>
- getSmallestSquaredDistances(std::vector<int>& k_indices,
+ getSmallestSquaredDistances(pcl::Indices& k_indices,
std::vector<float>& k_sqr_distances)
{
// Reserve space for distances
auto sqr_distances = std::make_shared<std::vector<float>>(classes_.size(), FLT_MAX);
// Select square distance to each class
- for (std::vector<int>::const_iterator i = k_indices.begin(); i != k_indices.end();
- ++i)
- if ((*sqr_distances)[labels_idx_[*i]] > k_sqr_distances[i - k_indices.begin()])
- (*sqr_distances)[labels_idx_[*i]] = k_sqr_distances[i - k_indices.begin()];
+ for (auto i = k_indices.cbegin(); i != k_indices.cend(); ++i)
+ if ((*sqr_distances)[labels_idx_[*i]] > k_sqr_distances[i - k_indices.cbegin()])
+ (*sqr_distances)[labels_idx_[*i]] = k_sqr_distances[i - k_indices.cbegin()];
return sqr_distances;
}
* \return pair of label and score for each training class from the neighborhood
*/
ResultPtr
- getLinearBestScores(std::vector<int>& k_indices, std::vector<float>& k_sqr_distances)
+ getLinearBestScores(pcl::Indices& k_indices, std::vector<float>& k_sqr_distances)
{
// Get smallest squared distances and transform them to a score for each class
auto sqr_distances = getSmallestSquaredDistances(k_indices, k_sqr_distances);
*/
ResultPtr
getGaussianBestScores(float gaussian_param,
- std::vector<int>& k_indices,
+ pcl::Indices& k_indices,
std::vector<float>& k_sqr_distances)
{
// Get smallest squared distances and transform them to a score for each class
cloud_cb(const CloudConstPtr& cloud);
protected:
+ void
+ refreshView();
+
pcl::visualization::PCLVisualizer::Ptr vis_;
pcl::OpenNIGrabber& grabber_;
std::string device_id_;
#include <pcl/apps/organized_segmentation_demo_qt.h>
#include <pcl/common/time.h>
-#include <pcl/common/transforms.h>
#include <pcl/features/integral_image_normal.h>
-#include <pcl/filters/voxel_grid.h>
-#include <pcl/io/oni_grabber.h>
-#include <pcl/io/openni_grabber.h>
-#include <pcl/io/pcd_grabber.h>
-#include <pcl/io/pcd_io.h>
+#include <pcl/io/grabber.h> // for Grabber
#include <pcl/segmentation/edge_aware_plane_comparator.h>
#include <pcl/segmentation/euclidean_cluster_comparator.h>
#include <pcl/segmentation/euclidean_plane_coefficient_comparator.h>
private Q_SLOTS:
void
timeoutSlot();
+
+private:
+ void
+ refreshView();
};
* POSSIBILITY OF SUCH DAMAGE.
*/
-#include <pcl/common/angles.h>
#include <pcl/common/common.h>
#include <pcl/common/time.h>
-#include <pcl/common/transforms.h>
-#include <pcl/console/parse.h>
-#include <pcl/console/print.h>
-#include <pcl/io/pcd_grabber.h>
-#include <pcl/io/pcd_io.h>
#include <pcl/registration/transformation_estimation_svd.h>
#include <pcl/visualization/pcl_visualizer.h>
#include <pcl/visualization/point_cloud_handlers.h>
#include <QMainWindow>
#include <QMutex>
#include <QTimer>
-#include <ui_pcd_video_player.h>
#include <ctime>
#include <iostream>
~PCDVideoPlayer() {}
protected:
+ void
+ refreshView();
+
pcl::visualization::PCLVisualizer::Ptr vis_;
pcl::PointCloud<pcl::PointXYZRGBA>::Ptr cloud_;
#pragma once
-#include <pcl/common/common.h>
+#include <pcl/point_cloud.h> // for PointCloud
+#include <pcl/point_types.h> // for PointXYZ
#include <vtkPolyData.h>
#include <vtkSmartPointer.h>
set(SUBSUBSYS_NAME modeler)
set(SUBSUBSYS_DESC "PCLModeler: PCL based reconstruction platform")
set(SUBSUBSYS_DEPS common geometry io filters sample_consensus segmentation visualization kdtree features surface octree registration keypoints tracking search apps)
+set(SUBSUBSYS_EXT_DEPS vtk Qt5)
set(REASON "")
-# Find VTK
-if(NOT VTK_FOUND)
- set(DEFAULT AUTO_OFF)
- set(REASON "VTK was not found.")
-else()
- set(DEFAULT TRUE)
- set(REASON)
- set(VTK_USE_FILE "${VTK_USE_FILE}" CACHE INTERNAL "VTK_USE_FILE")
- include("${VTK_USE_FILE}")
-endif()
-
-# QT5 Found?
-if(NOT Qt5_FOUND)
- set(DEFAULT AUTO_OFF)
- set(REASON "Qt5 was not found.")
-elseif(NOT ${DEFAULT} STREQUAL "AUTO_OFF")
- set(DEFAULT TRUE)
- set(REASON)
-endif()
-
# QVTK?
-if(NOT QVTK_FOUND)
+if(NOT HAVE_QVTK)
set(DEFAULT AUTO_OFF)
set(REASON "VTK was not built with Qt support.")
elseif(NOT ${DEFAULT} STREQUAL "AUTO_OFF")
set(DEFAULT FALSE)
endif()
-PCL_SUBSUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSUBSYS_NAME}" "${SUBSYS_DESC}" ${DEFAULT} "${REASON}")
-PCL_SUBSUBSYS_DEPEND(build "${SUBSYS_NAME}" "${SUBSUBSYS_NAME}" DEPS ${SUBSYS_DEPS} EXT_DEPS vtk)
+PCL_SUBSUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSUBSYS_NAME}" "${SUBSUBSYS_DESC}" ${DEFAULT} "${REASON}")
+PCL_SUBSUBSYS_DEPEND(build "${SUBSYS_NAME}" "${SUBSUBSYS_NAME}" DEPS ${SUBSUBSYS_DEPS} EXT_DEPS ${SUBSUBSYS_EXT_DEPS})
PCL_ADD_DOC("${SUBSUBSYS_NAME}")
main_window.ui
)
-set(moc_incs
- "include/pcl/${SUBSYS_NAME}/${SUBSUBSYS_NAME}/main_window.h"
- "include/pcl/${SUBSYS_NAME}/${SUBSUBSYS_NAME}/scene_tree.h"
- "include/pcl/${SUBSYS_NAME}/${SUBSUBSYS_NAME}/parameter_dialog.h"
- "include/pcl/${SUBSYS_NAME}/${SUBSUBSYS_NAME}/thread_controller.h"
- "include/pcl/${SUBSYS_NAME}/${SUBSUBSYS_NAME}/abstract_worker.h"
- "include/pcl/${SUBSYS_NAME}/${SUBSUBSYS_NAME}/cloud_mesh_item_updater.h"
-)
-
set(resources
resources/resources.qrc
)
set(incs
- ${moc_incs}
+ "include/pcl/${SUBSYS_NAME}/${SUBSUBSYS_NAME}/main_window.h"
+ "include/pcl/${SUBSYS_NAME}/${SUBSUBSYS_NAME}/scene_tree.h"
+ "include/pcl/${SUBSYS_NAME}/${SUBSUBSYS_NAME}/parameter_dialog.h"
+ "include/pcl/${SUBSYS_NAME}/${SUBSUBSYS_NAME}/thread_controller.h"
+ "include/pcl/${SUBSYS_NAME}/${SUBSUBSYS_NAME}/abstract_worker.h"
+ "include/pcl/${SUBSYS_NAME}/${SUBSUBSYS_NAME}/cloud_mesh_item_updater.h"
"include/pcl/${SUBSYS_NAME}/${SUBSUBSYS_NAME}/dock_widget.h"
"include/pcl/${SUBSYS_NAME}/${SUBSUBSYS_NAME}/abstract_item.h"
"include/pcl/${SUBSYS_NAME}/${SUBSUBSYS_NAME}/impl/scene_tree.hpp"
)
-# Qt stuff
-QT5_WRAP_UI(ui_srcs ${uis})
-QT5_WRAP_CPP(moc_srcs ${moc_incs} OPTIONS -DBOOST_TT_HAS_OPERATOR_HPP_INCLUDED)
-QT5_ADD_RESOURCES(resource_srcs ${resources})
-
-# Organize files
-source_group("Resources" FILES ${uis} ${resources} ${EXE_ICON})
-source_group("Generated" FILES ${ui_srcs} ${moc_srcs} ${resource_srcs} ${RCS_SOURCES})
-set_source_files_properties(${srcs} PROPERTIES OBJECT_DEPENDS "${ui_srcs}")
+list(APPEND CMAKE_AUTOUIC_SEARCH_PATHS "${CMAKE_CURRENT_SOURCE_DIR}")
# Generate executable
set(EXE_NAME "pcl_${SUBSUBSYS_NAME}")
-PCL_ADD_EXECUTABLE(${EXE_NAME} COMPONENT ${SUBSUBSYS_NAME} SOURCES ${ui_srcs} ${moc_srcs} ${resource_srcs} ${srcs} ${incs} ${impl_incs})
-target_link_libraries("${EXE_NAME}" pcl_common pcl_io pcl_kdtree pcl_filters pcl_visualization pcl_segmentation pcl_surface pcl_features pcl_sample_consensus pcl_search Qt5::Widgets)
+PCL_ADD_EXECUTABLE(
+ ${EXE_NAME}
+ COMPONENT
+ ${SUBSUBSYS_NAME}
+ SOURCES
+ ${uis}
+ ${resources}
+ ${srcs}
+ ${incs}
+ ${impl_incs})
-# Put the ui in the windows project file
-if(("${CMAKE_BUILD_TOOL}" MATCHES "msdev") OR("${CMAKE_BUILD_TOOL}" MATCHES "devenv"))
- list(APPEND srcs ${uis})
+target_link_libraries("${EXE_NAME}" pcl_common pcl_io pcl_kdtree pcl_filters pcl_visualization pcl_segmentation pcl_surface pcl_features pcl_sample_consensus pcl_search Qt5::Widgets)
+#TODO: Update when CMAKE 3.10 is available
+if(NOT (${VTK_VERSION} VERSION_LESS 9.0))
+ target_link_libraries("${EXE_NAME}" VTK::GUISupportQt)
endif()
# Install include files
*/
#pragma once
-
-#include <QVTKWidget.h>
+#include <pcl/visualization/qvtk_compatibility.h>
#include <vtkSmartPointer.h>
class RenderWindowItem;
-class RenderWindow : public QVTKWidget {
+class RenderWindow : public PCLQVTKWidget {
public:
RenderWindow(RenderWindowItem* render_window_item,
QWidget* parent = nullptr,
CloudMesh cloud_mesh;
for (const auto& mesh : cloud_meshes) {
if (filename.rfind(".obj") == (filename.length() - 4)) {
- std::size_t delta = cloud_mesh.cloud_->size();
+ index_t delta = cloud_mesh.cloud_->size();
for (auto polygon : mesh->polygons_) {
- for (unsigned int& vertice : polygon.vertices)
- vertice += static_cast<unsigned int>(delta);
+ for (index_t& vertice : polygon.vertices)
+ vertice += delta;
cloud_mesh.polygons_.push_back(polygon);
}
}
}
// Need to check for NaNs, Infs, ec
else {
- pcl::IndicesPtr indices(new std::vector<int>());
+ pcl::IndicesPtr indices(new pcl::Indices());
pcl::removeNaNFromPointCloud(*cloud_, *indices);
data->SetNumberOfValues(3 * indices->size());
if (cloud_->is_dense) {
for (const auto& polygon : polygons_) {
vtk_polygons_->InsertNextCell(polygon.vertices.size());
- for (const unsigned int& vertex : polygon.vertices)
+ for (const auto& vertex : polygon.vertices)
vtk_polygons_->InsertCellPoint(vertex);
}
}
else {
- pcl::IndicesPtr indices(new std::vector<int>());
+ pcl::IndicesPtr indices(new pcl::Indices());
pcl::removeNaNFromPointCloud(*cloud_, *indices);
for (const auto& polygon : polygons_) {
vtk_polygons_->InsertNextCell(polygon.vertices.size());
- for (const unsigned int& vertex : polygon.vertices)
+ for (const auto& vertex : polygon.vertices)
vtk_polygons_->InsertCellPoint((*indices)[vertex]);
}
}
pcl::modeler::CloudMeshItem::createChannels()
{
RenderWindowItem* render_window_item = dynamic_cast<RenderWindowItem*>(parent());
- addChild(new PointsActorItem(
- this, cloud_mesh_, render_window_item->getRenderWindow()->GetRenderWindow()));
- addChild(new NormalsActorItem(
- this, cloud_mesh_, render_window_item->getRenderWindow()->GetRenderWindow()));
- addChild(new SurfaceActorItem(
- this, cloud_mesh_, render_window_item->getRenderWindow()->GetRenderWindow()));
+ vtkRenderWindow* win =
+ getRenderWindowCompat(*(render_window_item->getRenderWindow()));
+
+ addChild(new PointsActorItem(this, cloud_mesh_, win));
+ addChild(new NormalsActorItem(this, cloud_mesh_, win));
+ addChild(new SurfaceActorItem(this, cloud_mesh_, win));
+
for (int i = 0, i_end = childCount(); i < i_end; ++i) {
ChannelActorItem* child_item = dynamic_cast<ChannelActorItem*>(child(i));
child_item->init();
for (int i = 0, i_end = childCount(); i < i_end; ++i) {
ChannelActorItem* child_item = dynamic_cast<ChannelActorItem*>(child(i));
child_item->switchRenderWindow(
- render_window_item->getRenderWindow()->GetRenderWindow());
+ getRenderWindowCompat(*render_window_item->getRenderWindow()));
}
render_window_item->getRenderWindow()->updateAxes();
pcl::NormalEstimation<pcl::PointSurfel, pcl::PointNormal> normal_estimator;
normal_estimator.setInputCloud(cloud);
- pcl::IndicesPtr indices(new std::vector<int>());
+ pcl::IndicesPtr indices(new pcl::Indices());
pcl::removeNaNFromPointCloud(*cloud, *indices);
normal_estimator.setIndices(indices);
}
}
else {
- pcl::IndicesPtr indices(new std::vector<int>());
+ pcl::IndicesPtr indices(new pcl::Indices());
pcl::removeNaNFromPointCloud(*cloud, *indices);
vtkIdType nr_normals = static_cast<vtkIdType>((indices->size() - 1) / level_ + 1);
pcl::modeler::RenderWindow::RenderWindow(RenderWindowItem* render_window_item,
QWidget* parent,
Qt::WindowFlags flags)
-: QVTKWidget(parent, flags)
+: PCLQVTKWidget(parent, flags)
, axes_(vtkSmartPointer<vtkCubeAxesActor>::New())
, render_window_item_(render_window_item)
{
void
pcl::modeler::RenderWindow::initRenderer()
{
- vtkSmartPointer<vtkRenderWindow> win = GetRenderWindow();
+ vtkSmartPointer<vtkRenderWindow> win = getRenderWindowCompat(*this);
vtkSmartPointer<vtkRenderer> renderer = vtkSmartPointer<vtkRenderer>::New();
win->AddRenderer(renderer);
dynamic_cast<SceneTree*>(render_window_item_->treeWidget())
->selectRenderWindowItem(render_window_item_);
- QVTKWidget::focusInEvent(event);
+ PCLQVTKWidget::focusInEvent(event);
}
//////////////////////////////////////////////////////////////////////////////////////////////
void
pcl::modeler::RenderWindow::render()
{
- GetRenderWindow()->Render();
+ getRenderWindowCompat(*this)->Render();
}
//////////////////////////////////////////////////////////////////////////////////////////////
pcl::modeler::RenderWindow::resetCamera()
{
double bounds[6];
- GetRenderWindow()->GetRenderers()->GetFirstRenderer()->ComputeVisiblePropBounds(
- bounds);
- GetRenderWindow()->GetRenderers()->GetFirstRenderer()->ResetCamera(bounds);
+ getRenderWindowCompat(*this)
+ ->GetRenderers()
+ ->GetFirstRenderer()
+ ->ComputeVisiblePropBounds(bounds);
+ getRenderWindowCompat(*this)->GetRenderers()->GetFirstRenderer()->ResetCamera(bounds);
render();
}
void
pcl::modeler::RenderWindow::getBackground(double& r, double& g, double& b)
{
- GetRenderWindow()->GetRenderers()->GetFirstRenderer()->GetBackground(r, g, b);
+ getRenderWindowCompat(*this)->GetRenderers()->GetFirstRenderer()->GetBackground(
+ r, g, b);
}
//////////////////////////////////////////////////////////////////////////////////////////////
void
pcl::modeler::RenderWindow::setBackground(double r, double g, double b)
{
- GetRenderWindow()->GetRenderers()->GetFirstRenderer()->SetBackground(r, g, b);
+ getRenderWindowCompat(*this)->GetRenderers()->GetFirstRenderer()->SetBackground(
+ r, g, b);
}
//////////////////////////////////////////////////////////////////////////////////////////////
vtkBoundingBox bb;
vtkActorCollection* actors =
- GetRenderWindow()->GetRenderers()->GetFirstRenderer()->GetActors();
+ getRenderWindowCompat(*this)->GetRenderers()->GetFirstRenderer()->GetActors();
actors->InitTraversal();
for (int i = 0, i_end = actors->GetNumberOfItems(); i < i_end; ++i) {
double bounds[6];
bb.GetBounds(bounds);
axes_->SetBounds(bounds);
- axes_->SetCamera(
- GetRenderWindow()->GetRenderers()->GetFirstRenderer()->GetActiveCamera());
+ axes_->SetCamera(getRenderWindowCompat(*this)
+ ->GetRenderers()
+ ->GetFirstRenderer()
+ ->GetActiveCamera());
}
//////////////////////////////////////////////////////////////////////////////////////////////
pcl::modeler::RenderWindow::setShowAxes(bool flag)
{
if (flag)
- GetRenderWindow()->GetRenderers()->GetFirstRenderer()->AddActor(axes_);
+ getRenderWindowCompat(*this)->GetRenderers()->GetFirstRenderer()->AddActor(axes_);
else
- GetRenderWindow()->GetRenderers()->GetFirstRenderer()->RemoveActor(axes_);
+ getRenderWindowCompat(*this)->GetRenderers()->GetFirstRenderer()->RemoveActor(
+ axes_);
}
set(SUBSUBSYS_NAME point_cloud_editor)
set(SUBSUBSYS_DESC "Point Cloud Editor - Simple editor for 3D point clouds")
set(SUBSUBSYS_DEPS common filters io apps)
-
-# QT5 Found?
-if(NOT Qt5_FOUND)
- set(DEFAULT AUTO_OFF)
- set(REASON "Qt5 was not found.")
-elseif(NOT ${DEFAULT} STREQUAL "AUTO_OFF")
- set(DEFAULT TRUE)
- set(REASON)
-endif()
-
-# Find OpenGL
-if(NOT OPENGL_FOUND)
- set(DEFAULT AUTO_OFF)
- set(REASON "OpenGL was not found.")
-elseif(NOT ${DEFAULT} STREQUAL "AUTO_OFF")
- set(DEFAULT TRUE)
- set(REASON)
-endif()
-
+set(SUBSUBSYS_EXT_DEPS vtk Qt5 OpenGL)
# Default to not building for now
if(${DEFAULT} STREQUAL "TRUE")
endif()
PCL_SUBSUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSUBSYS_NAME}" "${SUBSYS_DESC}" ${DEFAULT} "${REASON}")
-PCL_SUBSUBSYS_DEPEND(build "${SUBSYS_NAME}" "${SUBSUBSYS_NAME}" ${SUBSYS_DEPS})
+PCL_SUBSUBSYS_DEPEND(build "${SUBSYS_NAME}" "${SUBSUBSYS_NAME}" ${SUBSUBSYS_DEPS} EXT_DEPS ${SUBSUBSYS_EXT_DEPS})
PCL_ADD_DOC(${SUBSUBSYS_NAME})
if(NOT build)
return()
endif()
-set(MOC_INCS
- "include/pcl/apps/${SUBSUBSYS_NAME}/cloudEditorWidget.h"
- "include/pcl/apps/${SUBSUBSYS_NAME}/mainWindow.h"
- "include/pcl/apps/${SUBSUBSYS_NAME}/denoiseParameterForm.h"
- "include/pcl/apps/${SUBSUBSYS_NAME}/statisticsDialog.h"
-)
-
set(RSRC
resources/pceditor_resources.qrc
)
-set(INCS ${MOC_INCS}
+set(INCS
+ "include/pcl/apps/${SUBSUBSYS_NAME}/cloudEditorWidget.h"
+ "include/pcl/apps/${SUBSUBSYS_NAME}/mainWindow.h"
+ "include/pcl/apps/${SUBSUBSYS_NAME}/denoiseParameterForm.h"
+ "include/pcl/apps/${SUBSUBSYS_NAME}/statisticsDialog.h"
"include/pcl/apps/${SUBSUBSYS_NAME}/cloud.h"
"include/pcl/apps/${SUBSUBSYS_NAME}/cloudTransformTool.h"
"include/pcl/apps/${SUBSUBSYS_NAME}/command.h"
src/denoiseParameterForm.cpp
)
-qt5_wrap_cpp(MOC_SRCS ${MOC_INCS} OPTIONS -DBOOST_TT_HAS_OPERATOR_HPP_INCLUDED)
-qt5_add_resources(RESOURCES_SRCS ${RSRC})
-
include_directories(
"${CMAKE_CURRENT_BINARY_DIR}"
"${CMAKE_CURRENT_SOURCE_DIR}/include"
)
set(EXE_NAME "pcl_${SUBSUBSYS_NAME}")
-PCL_ADD_EXECUTABLE(${EXE_NAME} COMPONENT ${SUBSUBSYS_NAME} SOURCES ${SRCS} ${RESOURCES_SRCS} ${MOC_SRCS} ${INCS})
+PCL_ADD_EXECUTABLE(
+ ${EXE_NAME}
+ COMPONENT
+ ${SUBSUBSYS_NAME}
+ SOURCES
+ ${SRCS}
+ ${RSRC}
+ ${INCS})
target_link_libraries("${EXE_NAME}" Qt5::Widgets Qt5::OpenGL ${OPENGL_LIBRARIES} ${BOOST_LIBRARIES} pcl_common pcl_io pcl_filters)
/// the origin.
// XXX - handle shifting upon setting of a Cloud3D
// XXX - add functions for retrieving an unshifted Cloud3D
-// XXX - add functions for retrieveing unshifted points by index
+// XXX - add functions for retrieving unshifted points by index
// XXX - mark access functions below as returning shifted values
class Cloud : public Statistics
{
void
drawWithHighlightColor () const;
- /// @brief Sets the axis along which the displyed points should have the
+ /// @brief Sets the axis along which the displayed points should have the
/// color ramp applied.
/// @param a The axis id describing which direction the ramp should be
/// applied.
static const float DEFAULT_POINT_DISPLAY_SIZE_;
/// Default Highlight Point Size
static const float DEFAULT_POINT_HIGHLIGHT_SIZE_;
- /// Default Point Color - Red componenet
+ /// Default Point Color - Red component
static const float DEFAULT_POINT_DISPLAY_COLOR_RED_;
- /// Default Point Color - Green componenet
+ /// Default Point Color - Green component
static const float DEFAULT_POINT_DISPLAY_COLOR_GREEN_;
- /// Default Point Color - Blue componenet
+ /// Default Point Color - Blue component
static const float DEFAULT_POINT_DISPLAY_COLOR_BLUE_;
- /// Default Point Highlight Color - Red componenet
+ /// Default Point Highlight Color - Red component
static const float DEFAULT_POINT_HIGHLIGHT_COLOR_RED_;
- /// Default Point Highlight Color - Green componenet
+ /// Default Point Highlight Color - Green component
static const float DEFAULT_POINT_HIGHLIGHT_COLOR_GREEN_;
- /// Default Point Highlight Color - Blue componenet
+ /// Default Point Highlight Color - Blue component
static const float DEFAULT_POINT_HIGHLIGHT_COLOR_BLUE_;
private:
#pragma once
-#include <sstream>
+#include <string>
/// @brief Sets an array representing a 4x4 matrix to the identity
/// @param matrix A pointer to memory representing at least MATRIX_SIZE
bool
invertMatrix(const float* matrix, float* inverse);
-/// @brief Helper function for converting objects to strings (using operator<<)
-/// @param input The object to be converted
-/// @param result A reference to the string where the resulting string will be
-/// stored.
-template<typename T>
-void
-toString(T input, std::string &result)
-{
- std::stringstream ss;
- ss << input;
- result = ss.str();
-}
-
/// @brief Converts the passed string to lowercase in place
/// @param s The string to be made lower.
void
registerStats();
}
+ /// @brief Copy constructor.
+ /// @param selection a const reference to a selection object whose
+ /// properties will be copied.
+ Selection (const Selection& selection) = default;
+
/// @brief Equal operator
/// @param selection a const reference to a selection object whose
/// properties will be copied.
operator= (const TransformCommand&) = delete;
protected:
- // Transforms the coorindates of the selected points according to the transform
+ // Transforms the coordinates of the selected points according to the transform
// matrix.
void
execute () override;
/// pointers to constructor params
ConstSelectionPtr selection_ptr_;
- /// a pointer poiting to the cloud
+ /// a pointer pointing to the cloud
CloudPtr cloud_ptr_;
float translate_x_, translate_y_, translate_z_;
std::string
Cloud::getStat () const
{
- std::string title = "Total number of points: ";
- std::string num_str;
- ::toString(cloud_.size(), num_str);
- return (title + num_str);
+ const std::string title = "Total number of points: ";
+ const std::string num_str = std::to_string(cloud_.size());
+ return title + num_str;
}
void
/// @details the implementation of class CloudEditorWidget.
/// @author Yue Li and Matthew Hielsberg
-#include <cctype>
#include <QFileDialog>
#include <QMessageBox>
#include <QMouseEvent>
if (pcl::io::loadPCDFile<Point3D>(filename, tmp) == -1)
throw;
pcl_cloud_ptr = PclCloudPtr(new Cloud3D(tmp));
- std::vector<int> index;
+ pcl::Indices index;
pcl::removeNaNFromPointCloud(*pcl_cloud_ptr, *pcl_cloud_ptr, index);
Statistics::clear();
cloud_ptr_ = CloudPtr(new Cloud(*pcl_cloud_ptr, true));
/// @details the implementation of class CloudTransformTool
/// @author Yue Li and Matthew Hielsberg
-#include <algorithm>
-#include <cmath>
#include <pcl/apps/point_cloud_editor/common.h>
#include <pcl/apps/point_cloud_editor/cloudTransformTool.h>
#include <pcl/apps/point_cloud_editor/cloud.h>
CopyBuffer::getStat () const
{
if (buffer_.size() == 0)
- return ("");
- std::string title = "The number of points copied to the clipboard: ";
- std::string num_str;
- ::toString(buffer_.size(), num_str);
- return (title + num_str);
+ return "";
+ const std::string title = "The number of points copied to the clipboard: ";
+ const std::string num_str = std::to_string(buffer_.size());
+ return title + num_str;
}
/// @details the implementation of the class DenoiseCommand
/// @author Yue Li and Matthew Hielsberg
-#include <pcl/PointIndices.h>
#include <pcl/point_types.h>
#include <pcl/filters/statistical_outlier_removal.h>
#include <pcl/apps/point_cloud_editor/denoiseCommand.h>
filter.filter(temp_cloud);
// back up the removed indices.
pcl::IndicesConstPtr indices_ptr = filter.getRemovedIndices();
- for(const int &it : *indices_ptr)
+ for(const auto &it : *indices_ptr)
removed_indices_.addIndex(static_cast<unsigned int>(it));
// back up the removed points.
removed_points_.set(cloud_ptr_, removed_indices_);
/// @details the implementation of the MainWindow class
/// @author Yue Li and Matthew Hielsberg
-#include <algorithm>
#include <pcl/apps/point_cloud_editor/mainWindow.h>
#include <pcl/apps/point_cloud_editor/cloudEditorWidget.h>
#include <pcl/apps/point_cloud_editor/localTypes.h>
Selection::getStat () const
{
if (selected_indices_.empty ())
- return ("");
- std::string title = "Total number of selected points: ";
- std::string num_str;
- ::toString(selected_indices_.size(), num_str);
- return (title + num_str);
+ return "";
+ const std::string title = "Total number of selected points: ";
+ const std::string num_str = std::to_string(selected_indices_.size());
+ return title + num_str;
}
/// @details the implementation of class CloudTransformTool
/// @author Yue Li and Matthew Hielsberg
-#include <cmath>
#include <pcl/apps/point_cloud_editor/selectionTransformTool.h>
#include <pcl/apps/point_cloud_editor/cloud.h>
#include <pcl/apps/point_cloud_editor/selection.h>
/// class has been based on
/// @author Matthew Hielsberg
-#include <algorithm>
#include <limits>
#include <pcl/apps/point_cloud_editor/common.h>
#include <pcl/apps/point_cloud_editor/trackball.h>
float rgb_m;
bool exists_m;
- pcl::for_each_type<FieldListM>(pcl::CopyIfFieldExists<PointInT, float>(
- (*scene_vis)[0], "rgb", exists_m, rgb_m));
+ pcl::for_each_type<FieldListM>(
+ pcl::CopyIfFieldExists<PointInT, float>((*scene_vis)[0], "rgb", exists_m, rgb_m));
std::cout << "Color exists:" << static_cast<int>(exists_m) << std::endl;
if (exists_m) {
#include <pcl/apps/face_detection/openni_frame_source.h>
#include <pcl/common/time.h>
#include <pcl/console/parse.h>
-#include <pcl/features/integral_image_normal.h>
#include <pcl/recognition/face_detection/rf_face_detector_trainer.h>
// clang-format off
#include <pcl/apps/face_detection/face_detection_apps_utils.h>
#include <pcl/apps/face_detection/openni_frame_source.h>
-#include <pcl/io/pcd_io.h>
#include <pcl/memory.h>
namespace OpenNIFrameSource {
#include <pcl/features/pfh.h>
#include <pcl/features/pfhrgb.h>
#include <pcl/features/shot_omp.h>
+#include <pcl/filters/extract_indices.h> // for ExtractIndices
#include <pcl/io/pcd_io.h>
#include <pcl/keypoints/harris_3d.h>
#include <pcl/keypoints/sift_keypoint.h>
#include <pcl/registration/correspondence_rejection_sample_consensus.h>
#include <pcl/registration/icp.h>
#include <pcl/registration/transformation_estimation_svd.h>
-#include <pcl/registration/transforms.h>
#include <pcl/sample_consensus/method_types.h>
#include <pcl/sample_consensus/model_types.h>
#include <pcl/search/kdtree.h>
#include <pcl/segmentation/extract_clusters.h>
#include <pcl/segmentation/sac_segmentation.h>
#include <pcl/surface/gp3.h>
-#include <pcl/surface/grid_projection.h>
#include <pcl/surface/marching_cubes_hoppe.h>
#include <pcl/visualization/pcl_visualizer.h>
#include <pcl/ModelCoefficients.h>
extract.setNegative(true);
extract.filter(*segmented);
- std::vector<int> indices;
+ pcl::Indices indices;
pcl::removeNaNFromPointCloud(*segmented, *segmented, indices);
std::cout << "OK" << std::endl;
if (cluster_indices.size() > 1)
std::cout << " Using largest one...";
std::cout << std::endl;
- typename pcl::IndicesPtr indices(new std::vector<int>);
+ typename pcl::IndicesPtr indices(new pcl::Indices);
*indices = cluster_indices[0].indices;
extract.setInputCloud(segmented);
extract.setIndices(indices);
// Find the index of the best match for each keypoint, and store it in
// "correspondences_out"
const int k = 1;
- std::vector<int> k_indices(k);
+ pcl::Indices k_indices(k);
std::vector<float> k_squared_distances(k);
for (int i = 0; i < static_cast<int>(source->size()); ++i) {
descriptor_kdtree.nearestKSearch(*source, i, k, k_indices, k_squared_distances);
break;
case 1:
- glDrawPixels(gmm_image_->width,
- gmm_image_->height,
- GL_RGB,
- GL_FLOAT,
- &((*gmm_image_)[0]));
+ glDrawPixels(
+ gmm_image_->width, gmm_image_->height, GL_RGB, GL_FLOAT, &((*gmm_image_)[0]));
break;
case 2:
*/
#include <pcl/apps/manual_registration.h>
+#include <pcl/io/pcd_io.h> // for loadPCDFile
#include <QApplication>
#include <QEvent>
#include <QMutexLocker>
#include <QObject>
+#include <ui_manual_registration.h>
#include <vtkCamera.h>
+#include <vtkGenericOpenGLRenderWindow.h>
#include <vtkRenderWindow.h>
#include <vtkRendererCollection.h>
this->setWindowTitle("PCL Manual Registration");
// Set up the source window
+#if VTK_MAJOR_VERSION > 8
+ auto renderer_src = vtkSmartPointer<vtkRenderer>::New();
+ auto renderWindow_src = vtkSmartPointer<vtkGenericOpenGLRenderWindow>::New();
+ renderWindow_src->AddRenderer(renderer_src);
+ vis_src_.reset(
+ new pcl::visualization::PCLVisualizer(renderer_src, renderWindow_src, "", false));
+#else
vis_src_.reset(new pcl::visualization::PCLVisualizer("", false));
- ui_->qvtk_widget_src->SetRenderWindow(vis_src_->getRenderWindow());
- vis_src_->setupInteractor(ui_->qvtk_widget_src->GetInteractor(),
- ui_->qvtk_widget_src->GetRenderWindow());
+#endif // VTK_MAJOR_VERSION > 8
+ setRenderWindowCompat(*(ui_->qvtk_widget_src), *(vis_src_->getRenderWindow()));
+ vis_src_->setupInteractor(getInteractorCompat(*(ui_->qvtk_widget_src)),
+ getRenderWindowCompat(*(ui_->qvtk_widget_src)));
+
vis_src_->getInteractorStyle()->setKeyboardModifier(
pcl::visualization::INTERACTOR_KB_MOD_SHIFT);
- ui_->qvtk_widget_src->update();
vis_src_->registerPointPickingCallback(&ManualRegistration::SourcePointPickCallback,
*this);
// Set up the destination window
+#if VTK_MAJOR_VERSION > 8
+ auto renderer_dst = vtkSmartPointer<vtkRenderer>::New();
+ auto renderWindow_dst = vtkSmartPointer<vtkGenericOpenGLRenderWindow>::New();
+ renderWindow_dst->AddRenderer(renderer_dst);
+ vis_dst_.reset(
+ new pcl::visualization::PCLVisualizer(renderer_dst, renderWindow_dst, "", false));
+#else
vis_dst_.reset(new pcl::visualization::PCLVisualizer("", false));
- ui_->qvtk_widget_dst->SetRenderWindow(vis_dst_->getRenderWindow());
- vis_dst_->setupInteractor(ui_->qvtk_widget_dst->GetInteractor(),
- ui_->qvtk_widget_dst->GetRenderWindow());
+#endif // VTK_MAJOR_VERSION > 8
+ setRenderWindowCompat(*(ui_->qvtk_widget_dst), *(vis_dst_->getRenderWindow()));
+ vis_dst_->setupInteractor(getInteractorCompat(*(ui_->qvtk_widget_dst)),
+ getRenderWindowCompat(*(ui_->qvtk_widget_dst)));
+
vis_dst_->getInteractorStyle()->setKeyboardModifier(
pcl::visualization::INTERACTOR_KB_MOD_SHIFT);
- ui_->qvtk_widget_dst->update();
vis_dst_->registerPointPickingCallback(&ManualRegistration::DstPointPickCallback,
*this);
+ // Render view
+ refreshView();
// Connect all buttons
connect(ui_->confirmSrcPointButton,
ManualRegistration::confirmSrcPointPressed()
{
if (src_point_selected_) {
- src_pc_.points.push_back(src_point_);
- PCL_INFO("Selected %zu source points\n",
- static_cast<std::size_t>(src_pc_.size()));
+ src_pc_.push_back(src_point_);
+ PCL_INFO("Selected %zu source points\n", static_cast<std::size_t>(src_pc_.size()));
src_point_selected_ = false;
src_pc_.width = src_pc_.size();
}
ManualRegistration::confirmDstPointPressed()
{
if (dst_point_selected_) {
- dst_pc_.points.push_back(dst_point_);
+ dst_pc_.push_back(dst_point_);
PCL_INFO("Selected %zu destination points\n",
static_cast<std::size_t>(dst_pc_.size()));
dst_point_selected_ = false;
{
dst_point_selected_ = false;
src_point_selected_ = false;
- src_pc_.points.clear();
- dst_pc_.points.clear();
+ src_pc_.clear();
+ dst_pc_.clear();
src_pc_.height = 1;
src_pc_.width = 0;
dst_pc_.height = 1;
->GetActiveCamera()
->SetParallelProjection(1);
}
- ui_->qvtk_widget_src->update();
- ui_->qvtk_widget_dst->update();
+
+ refreshView();
}
// TODO
}
cloud_dst_modified_ = false;
}
- ui_->qvtk_widget_src->update();
+ refreshView();
+}
+
+void
+ManualRegistration::refreshView()
+{
+#if VTK_MAJOR_VERSION > 8
+ ui_->qvtk_widget_dst->renderWindow()->Render();
+#else
ui_->qvtk_widget_dst->update();
+#endif // VTK_MAJOR_VERSION > 8
}
void
</property>
<layout class="QGridLayout" name="gridLayout">
<item row="0" column="3">
- <widget class="QVTKWidget" name="qvtk_widget_src">
+ <widget class="PCLQVTKWidget" name="qvtk_widget_src">
<property name="sizePolicy">
<sizepolicy hsizetype="Expanding" vsizetype="Expanding">
<horstretch>255</horstretch>
</layout>
</item>
<item row="0" column="7">
- <widget class="QVTKWidget" name="qvtk_widget_dst">
+ <widget class="PCLQVTKWidget" name="qvtk_widget_dst">
<property name="sizePolicy">
<sizepolicy hsizetype="Expanding" vsizetype="Expanding">
<horstretch>255</horstretch>
</widget>
<customwidgets>
<customwidget>
- <class>QVTKWidget</class>
- <extends>QWidget</extends>
- <header>QVTKWidget.h</header>
+ <class>PCLQVTKWidget</class>
+ <extends>QOpenGLWidget</extends>
+ <header location="global">pcl/visualization/qvtk_compatibility.h</header>
</customwidget>
</customwidgets>
<resources/>
feature_persistence.setDistanceMetric(pcl::CS);
PointCloud<FPFHSignature33>::Ptr output_features(new PointCloud<FPFHSignature33>());
- auto output_indices = pcl::make_shared<std::vector<int>>();
+ auto output_indices = pcl::make_shared<pcl::Indices>();
feature_persistence.determinePersistentFeatures(*output_features, output_indices);
PCL_INFO("persistent features cloud size: %zu\n",
#include <pcl/console/parse.h>
#include <pcl/console/print.h>
#include <pcl/io/openni_grabber.h>
-#include <pcl/io/pcd_io.h>
#include <pcl/keypoints/agast_2d.h>
#include <pcl/visualization/image_viewer.h>
#include <pcl/visualization/pcl_visualizer.h>
std::string
getStrBool(bool state)
{
- std::stringstream ss;
- ss << state;
- return ss.str();
+ return state ? "1" : "0";
}
/////////////////////////////////////////////////////////////////////////
std::size_t j = 0;
for (std::size_t i = 0; i < keypoints->size(); ++i) {
- const PointT& pt =
- (*cloud)(static_cast<long unsigned int>((*keypoints)[i].u),
- static_cast<long unsigned int>((*keypoints)[i].v));
+ const PointT& pt = (*cloud)(static_cast<long unsigned int>((*keypoints)[i].u),
+ static_cast<long unsigned int>((*keypoints)[i].v));
if (!std::isfinite(pt.x) || !std::isfinite(pt.y) || !std::isfinite(pt.z))
continue;
#define SHOW_FPS 1
#include <pcl/apps/timer.h>
-#include <pcl/common/angles.h>
#include <pcl/common/common.h>
#include <pcl/common/time.h>
-#include <pcl/console/parse.h>
-#include <pcl/console/print.h>
-#include <pcl/filters/extract_indices.h>
#include <pcl/io/openni_grabber.h>
-#include <pcl/io/pcd_io.h>
#include <pcl/keypoints/brisk_2d.h>
#include <pcl/visualization/image_viewer.h>
#include <pcl/visualization/pcl_visualizer.h>
std::string
getStrBool(bool state)
{
- std::stringstream ss;
- ss << state;
- return ss.str();
+ return state ? "1" : "0";
}
/////////////////////////////////////////////////////////////////////////
std::size_t j = 0;
for (std::size_t i = 0; i < keypoints->size(); ++i) {
- PointT pt =
- bilinearInterpolation(cloud, (*keypoints)[i].x, (*keypoints)[i].y);
+ PointT pt = bilinearInterpolation(cloud, (*keypoints)[i].x, (*keypoints)[i].y);
keypoints3d[j].x = pt.x;
keypoints3d[j].y = pt.y;
#include <pcl/common/angles.h>
#include <pcl/common/common.h>
#include <pcl/common/time.h>
-#include <pcl/console/parse.h>
#include <pcl/console/print.h>
#include <pcl/features/integral_image_normal.h>
-#include <pcl/filters/extract_indices.h>
#include <pcl/geometry/polygon_operations.h>
#include <pcl/io/openni_grabber.h>
-#include <pcl/io/pcd_io.h>
#include <pcl/sample_consensus/sac_model_plane.h>
#include <pcl/search/organized.h>
#include <pcl/segmentation/edge_aware_plane_comparator.h>
* \param[out] object the segmented resultant object
*/
void
- segmentObject(int picked_idx,
+ segmentObject(pcl::index_t picked_idx,
const CloudConstPtr& cloud,
const PointIndices::Ptr& plane_indices,
const PointIndices::Ptr& plane_boundary_indices,
for (int p_it = 0; p_it < static_cast<int>(indices_fullset_.size()); ++p_it)
indices_fullset_[p_it] = p_it;
}
- std::vector<int> indices_subset = plane_indices->indices;
+ pcl::Indices indices_subset = plane_indices->indices;
std::sort(indices_subset.begin(), indices_subset.end());
set_difference(indices_fullset_.begin(),
indices_fullset_.end(),
l.label = 0;
PointCloud<Label>::Ptr scene(new PointCloud<Label>(cloud->width, cloud->height, l));
// Mask the objects that we want to split into clusters
- for (const int& index : points_above_plane->indices)
+ for (const auto& index : points_above_plane->indices)
(*scene)[index].label = 1;
euclidean_cluster_comparator->setLabels(scene);
/////////////////////////////////////////////////////////////////////////
void
segment(const PointT& picked_point,
- int picked_idx,
+ pcl::index_t picked_idx,
PlanarRegion<PointT>& region,
PointIndices&,
CloudPtr& object)
if (idx == -1)
return;
- std::vector<int> indices(1);
+ pcl::Indices indices(1);
std::vector<float> distances(1);
// Use mutices to make sure we get the right cloud
event.getPoint(picked_pt.x, picked_pt.y, picked_pt.z);
// Add a sphere to it in the PCLVisualizer window
- std::stringstream ss;
- ss << "sphere_" << idx;
- cloud_viewer_.addSphere(picked_pt, 0.01, 1.0, 0.0, 0.0, ss.str());
+ const std::string sphere_name = "sphere_" + std::to_string(idx);
+ cloud_viewer_.addSphere(picked_pt, 0.01, 1.0, 0.0, 0.0, sphere_name);
// Check to see if we have access to the actual cloud data. Use the previously built
// search object.
// Compute the min/max of the object
PointT min_pt, max_pt;
getMinMax3D(*object, min_pt, max_pt);
- std::stringstream ss2;
- ss2 << "cube_" << idx;
+ const std::string cube_name = "cube_" + std::to_string(idx);
// Visualize the bounding box in 3D...
cloud_viewer_.addCube(min_pt.x,
max_pt.x,
0.0,
1.0,
0.0,
- ss2.str());
+ cube_name);
cloud_viewer_.setShapeRenderingProperties(
- visualization::PCL_VISUALIZER_LINE_WIDTH, 10, ss2.str());
+ visualization::PCL_VISUALIZER_LINE_WIDTH, 10, cube_name);
// ...and 2D
image_viewer_.addRectangle(search_.getInputCloud(), *object);
bool first_frame_;
// Segmentation
- std::vector<int> indices_fullset_;
+ pcl::Indices indices_fullset_;
PointIndices::Ptr plane_indices_;
CloudPtr plane_;
IntegralImageNormalEstimation<PointT, Normal> ne_;
#define SHOW_FPS 1
#include <pcl/apps/timer.h>
-#include <pcl/common/angles.h>
#include <pcl/common/common.h>
#include <pcl/io/openni_grabber.h>
-#include <pcl/io/pcd_io.h>
#include <pcl/keypoints/susan.h>
#include <pcl/visualization/image_viewer.h>
#include <pcl/visualization/pcl_visualizer.h>
std::string
getStrBool(bool state)
{
- std::stringstream ss;
- ss << state;
- return ss.str();
+ return state ? "1" : "0";
}
/////////////////////////////////////////////////////////////////////////
#define SHOW_FPS 1
#include <pcl/apps/timer.h>
-#include <pcl/common/common.h>
#include <pcl/console/parse.h>
#include <pcl/console/print.h>
#include <pcl/io/openni_grabber.h>
image_viewer_.removeLayer(getStrBool(keypts));
std::vector<int> uv;
uv.reserve(keypoints_indices_->indices.size() * 2);
- for (const int& index : keypoints_indices_->indices) {
+ for (const auto& index : keypoints_indices_->indices) {
int u(index % cloud->width);
int v(index / cloud->width);
image_viewer_.markPoint(u,
#include <pcl/apps/vfh_nn_classifier.h>
#include <pcl/io/pcd_io.h>
-#include <pcl/point_types.h>
int
main(int, char* argv[])
#include <pcl/filters/voxel_grid.h>
#include <pcl/io/openni_camera/openni_driver.h>
#include <pcl/io/openni_grabber.h>
-#include <pcl/io/pcd_io.h>
#include <pcl/surface/concave_hull.h>
#include <pcl/visualization/cloud_viewer.h>
#include <pcl/point_cloud.h>
#include <pcl/filters/passthrough.h>
#include <pcl/io/openni_camera/openni_driver.h>
#include <pcl/io/openni_grabber.h>
-#include <pcl/io/pcd_io.h>
#include <pcl/surface/convex_hull.h>
#include <pcl/visualization/cloud_viewer.h>
#include <pcl/point_cloud.h>
*/
#include <pcl/common/time.h>
-#include <pcl/console/parse.h>
#include <pcl/features/boundary.h>
#include <pcl/features/integral_image_normal.h>
#include <pcl/filters/approximate_voxel_grid.h>
#include <pcl/io/openni_camera/openni_driver.h>
#include <pcl/io/openni_grabber.h>
-#include <pcl/io/pcd_io.h>
#include <pcl/visualization/cloud_viewer.h>
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
octree->addPointsFromInputCloud();
std::cerr << octree->getLeafCount() << " -- ";
- std::vector<int> newPointIdxVector;
+ pcl::Indices newPointIdxVector;
// get a vector of new points, which did not exist in previous buffer
octree->getPointIndicesFromNewVoxels(newPointIdxVector, noise_filter_);
filtered_cloud.reset(new pcl::PointCloud<pcl::PointXYZRGBA>(*cloud));
filtered_cloud->points.reserve(newPointIdxVector.size());
- for (const int& idx : newPointIdxVector)
+ for (const auto& idx : newPointIdxVector)
(*filtered_cloud)[idx].rgba = 255 << 16;
if (!viewer.wasStopped())
filtered_cloud->points.reserve(newPointIdxVector.size());
- for (const int& idx : newPointIdxVector)
+ for (const auto& idx : newPointIdxVector)
filtered_cloud->points.push_back((*cloud)[idx]);
if (!viewer.wasStopped())
#include <pcl/common/time.h>
#include <pcl/io/openni_camera/openni_driver.h>
#include <pcl/io/openni_grabber.h>
-#include <pcl/io/pcd_io.h>
#include <pcl/surface/organized_fast_mesh.h>
#include <pcl/visualization/pcl_visualizer.h> // for PCLVisualizer
#include <pcl/point_cloud.h>
#include <pcl/filters/voxel_grid.h>
#include <pcl/io/openni_camera/openni_driver.h>
#include <pcl/io/openni_grabber.h>
-#include <pcl/io/pcd_io.h>
#include <pcl/visualization/cloud_viewer.h>
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
cloud_subsampled_.reset(new typename pcl::PointCloud<PointType>());
normals_.reset(new pcl::PointCloud<pcl::Normal>());
features_.reset(new pcl::PointCloud<pcl::FPFHSignature33>());
- feature_indices_.reset(new std::vector<int>());
+ feature_indices_.reset(new pcl::Indices());
feature_locations_.reset(new typename pcl::PointCloud<PointType>());
// Subsample input cloud
extract_indices_filter_.setIndices(feature_indices_);
extract_indices_filter_.filter(*feature_locations_);
- PCL_INFO("Persistent feature locations %zu\n", static_cast<std::size_t>(feature_locations_->size()));
+ PCL_INFO("Persistent feature locations %zu\n",
+ static_cast<std::size_t>(feature_locations_->size()));
cloud_ = cloud;
saveCloud()
{
FPS_CALC("I/O");
- std::stringstream ss;
- ss << dir_name_ << "/" << file_name_ << "_"
- << boost::posix_time::to_iso_string(
- boost::posix_time::microsec_clock::local_time())
- << ".pcd";
+ const std::string time = boost::posix_time::to_iso_string(
+ boost::posix_time::microsec_clock::local_time());
+ const std::string filepath = dir_name_ + '/' + file_name_ + '_' + time + ".pcd";
if (format_ & 1) {
- writer_.writeBinary<PointType>(ss.str(), *cloud_);
+ writer_.writeBinary<PointType>(filepath, *cloud_);
// std::cerr << "Data saved in BINARY format to " << ss.str () << std::endl;
}
if (format_ & 2) {
- writer_.writeBinaryCompressed<PointType>(ss.str(), *cloud_);
+ writer_.writeBinaryCompressed<PointType>(filepath, *cloud_);
}
if (format_ & 4) {
- writer_.writeBinaryCompressed<PointType>(ss.str(), *cloud_);
+ writer_.writeBinaryCompressed<PointType>(filepath, *cloud_);
}
}
0, image->getWidth() - 1, 0, image->getHeight() - 1, 0, 0);
importer_->SetDataExtentToWholeExtent();
- std::stringstream ss;
- ss << "frame_" + time + "_rgb.tiff";
+ const std::string rgb_frame_filename = "frame_" + time + "_rgb.tiff";
importer_->SetImportVoidPointer(const_cast<void*>(data), 1);
importer_->Update();
flipper_->SetInputConnection(importer_->GetOutputPort());
flipper_->Update();
- writer_->SetFileName(ss.str().c_str());
+ writer_->SetFileName(rgb_frame_filename.c_str());
writer_->SetInputConnection(flipper_->GetOutputPort());
writer_->Write();
}
if (depth_image) {
- std::stringstream ss;
- ss << "frame_" + time + "_depth.tiff";
+ const std::string depth_frame_filename = "frame_" + time + "_depth.tiff";
depth_importer_->SetWholeExtent(
0, depth_image->getWidth() - 1, 0, depth_image->getHeight() - 1, 0, 0);
depth_importer_->Update();
flipper_->SetInputConnection(depth_importer_->GetOutputPort());
flipper_->Update();
- writer_->SetFileName(ss.str().c_str());
+ writer_->SetFileName(depth_frame_filename.c_str());
writer_->SetInputConnection(flipper_->GetOutputPort());
writer_->Write();
}
0, image->getWidth() - 1, 0, image->getHeight() - 1, 0, 0);
importer_->SetDataExtentToWholeExtent();
- std::stringstream ss;
- ss << "frame_" + time + "_rgb.tiff";
+ const std::string rgb_frame_filename = "frame_" + time + "_rgb.tiff";
importer_->SetImportVoidPointer(const_cast<void*>(data), 1);
importer_->Update();
flipper_->SetInputConnection(importer_->GetOutputPort());
flipper_->Update();
- writer_->SetFileName(ss.str().c_str());
+ writer_->SetFileName(rgb_frame_filename.c_str());
writer_->SetInputConnection(flipper_->GetOutputPort());
writer_->Write();
- std::cout << "writing rgb frame: " << ss.str() << std::endl;
+ std::cout << "writing rgb frame: " << rgb_frame_filename << std::endl;
}
}
depth_image_viewer_.addRGBImage(
depth_data, depth_image->getWidth(), depth_image->getHeight());
if (continuous_ || trigger_) {
- std::stringstream ss;
- ss << "frame_" + time + "_depth.tiff";
+ const std::string depth_frame_filename = "frame_" + time + "_depth.tiff";
depth_importer_->SetWholeExtent(
0, depth_image->getWidth() - 1, 0, depth_image->getHeight() - 1, 0, 0);
depth_importer_->Update();
flipper_->SetInputConnection(depth_importer_->GetOutputPort());
flipper_->Update();
- writer_->SetFileName(ss.str().c_str());
+ writer_->SetFileName(depth_frame_filename.c_str());
writer_->SetInputConnection(flipper_->GetOutputPort());
writer_->Write();
- std::cout << "writing depth frame: " << ss.str() << std::endl;
+ std::cout << "writing depth frame: " << depth_frame_filename << std::endl;
}
}
trigger_ = false;
*/
#include <pcl/common/time.h>
-#include <pcl/console/parse.h>
#include <pcl/features/integral_image_normal.h>
#include <pcl/io/openni_camera/openni_driver.h>
#include <pcl/io/openni_grabber.h>
-#include <pcl/io/pcd_io.h>
#include <pcl/visualization/cloud_viewer.h>
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
#include <pcl/io/pcd_io.h>
#include <pcl/keypoints/harris_2d.h>
#include <pcl/tracking/pyramidal_klt.h>
-#include <pcl/visualization/boost.h>
#include <pcl/visualization/image_viewer.h>
+#include <boost/date_time/posix_time/posix_time.hpp> // for to_iso_string, local_time
+
#include <mutex>
#define SHOW_FPS 1
if (tracker_->getInitialized() && cloud_) {
if (points_mutex_.try_lock()) {
keypoints_ = tracker_->getTrackedPoints();
- points_status_ = tracker_->getPointsToTrackStatus();
+ points_status_ = tracker_->getStatusOfPointsToTrack();
points_mutex_.unlock();
}
std::vector<float> markers;
markers.reserve(keypoints_->size() * 2);
for (std::size_t i = 0; i < keypoints_->size(); ++i) {
- if (points_status_->indices[i] < 0)
+ if ((*points_status_)[i] < 0)
continue;
const pcl::PointUV& uv = (*keypoints_)[i];
markers.push_back(uv.u);
typename pcl::tracking::PyramidalKLTTracker<PointType>::Ptr tracker_;
pcl::PointCloud<pcl::PointUV>::ConstPtr keypoints_;
pcl::PointIndicesConstPtr points_;
- pcl::PointIndicesConstPtr points_status_;
+ pcl::shared_ptr<const std::vector<int>> points_status_;
int counter_;
};
#include <pcl/console/parse.h>
#include <pcl/io/openni_camera/openni_driver.h>
#include <pcl/io/openni_grabber.h>
+#include <pcl/search/kdtree.h> // for KdTree
#include <pcl/surface/mls.h>
-#include <pcl/visualization/cloud_viewer.h>
+#include <pcl/visualization/keyboard_event.h> // for KeyboardEvent
+#include <pcl/visualization/pcl_visualizer.h> // for PCLVisualizer
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
-#include <pcl/search/kdtree.h> // for KdTree
#include <mutex>
#include <pcl/common/time.h>
#include <pcl/console/parse.h>
#include <pcl/filters/voxel_grid.h>
-#include <pcl/io/openni_camera/openni_driver.h>
#include <pcl/io/openni_grabber.h>
#include <pcl/visualization/cloud_viewer.h>
#include <pcl/point_cloud.h>
if (!bServerFileMode) {
if (bEnDecode) {
// ENCODING
- ofstream compressedPCFile;
- compressedPCFile.open(fileName.c_str(), std::ios::out | std::ios::trunc | std::ios::binary);
+ std::ofstream compressedPCFile;
+ compressedPCFile.open(fileName.c_str(),
+ std::ios::out | std::ios::trunc | std::ios::binary);
if (!bShowInputCloud) {
EventHelper v(compressedPCFile, octreeCoder, field_name, min_v, max_v);
if (bEnDecode) {
// ENCODING
std::ofstream compressedPCFile;
- compressedPCFile.open(fileName.c_str(), std::ios::out | std::ios::trunc | std::ios::binary);
+ compressedPCFile.open(fileName.c_str(),
+ std::ios::out | std::ios::trunc | std::ios::binary);
if (!bShowInputCloud) {
EventHelper v(compressedPCFile,
#include <pcl/common/time.h>
#include <pcl/features/integral_image_normal.h>
#include <pcl/features/organized_edge_detection.h>
-#include <pcl/io/io.h>
#include <pcl/io/openni_grabber.h>
#include <pcl/visualization/pcl_visualizer.h>
#include <pcl/common/time.h>
#include <pcl/features/integral_image_normal.h>
-#include <pcl/features/normal_3d.h>
-#include <pcl/filters/extract_indices.h>
-#include <pcl/io/io.h>
#include <pcl/io/openni_grabber.h>
#include <pcl/segmentation/organized_connected_component_segmentation.h>
#include <pcl/segmentation/organized_multi_plane_segmentation.h>
#include <pcl/segmentation/planar_region.h>
#include <pcl/visualization/cloud_viewer.h>
#include <pcl/visualization/pcl_visualizer.h>
-#include <pcl/ModelCoefficients.h>
#include <mutex>
*/
#include <pcl/apps/openni_passthrough.h>
-#include <pcl/console/parse.h>
#include <QApplication>
#include <QEvent>
#include <QMutexLocker>
#include <QObject>
+#include <ui_openni_passthrough.h>
+#include <vtkGenericOpenGLRenderWindow.h>
#include <vtkRenderWindow.h>
+#include <vtkRendererCollection.h>
#include <thread>
ui_->setupUi(this);
this->setWindowTitle("PCL OpenNI PassThrough Viewer");
+ // Create the QVTKWidget
+#if VTK_MAJOR_VERSION > 8
+ auto renderer = vtkSmartPointer<vtkRenderer>::New();
+ auto renderWindow = vtkSmartPointer<vtkGenericOpenGLRenderWindow>::New();
+ renderWindow->AddRenderer(renderer);
+ vis_.reset(new pcl::visualization::PCLVisualizer(renderer, renderWindow, "", false));
+#else
vis_.reset(new pcl::visualization::PCLVisualizer("", false));
- ui_->qvtk_widget->SetRenderWindow(vis_->getRenderWindow());
- vis_->setupInteractor(ui_->qvtk_widget->GetInteractor(),
- ui_->qvtk_widget->GetRenderWindow());
+#endif // VTK_MAJOR_VERSION > 8
+ setRenderWindowCompat(*(ui_->qvtk_widget), *(vis_->getRenderWindow()));
+ vis_->setupInteractor(getInteractorCompat(*(ui_->qvtk_widget)),
+ getRenderWindowCompat(*(ui_->qvtk_widget)));
+
vis_->getInteractorStyle()->setKeyboardModifier(
pcl::visualization::INTERACTOR_KB_MOD_SHIFT);
- ui_->qvtk_widget->update();
+
+ refreshView();
// Start the OpenNI data acquision
std::function<void(const CloudConstPtr&)> f = [this](const CloudConstPtr& cloud) {
ui_->qvtk_widget->update();
}
+void
+OpenNIPassthrough::refreshView()
+{
+#if VTK_MAJOR_VERSION > 8
+ ui_->qvtk_widget->renderWindow()->Render();
+#else
+ ui_->qvtk_widget->update();
+#endif // VTK_MAJOR_VERSION > 8
+}
+
int
main(int argc, char** argv)
{
</property>
<layout class="QGridLayout" name="gridLayout">
<item row="0" column="0">
- <widget class="QVTKWidget" name="qvtk_widget">
+ <widget class="PCLQVTKWidget" name="qvtk_widget">
<property name="sizePolicy">
<sizepolicy hsizetype="Expanding" vsizetype="Expanding">
<horstretch>255</horstretch>
</widget>
<customwidgets>
<customwidget>
- <class>QVTKWidget</class>
- <extends>QWidget</extends>
- <header>QVTKWidget.h</header>
+ <class>PCLQVTKWidget</class>
+ <extends>QOpenGLWidget</extends>
+ <header location="global">pcl/visualization/qvtk_compatibility.h</header>
</customwidget>
</customwidgets>
<resources>
*/
#include <pcl/common/time.h>
-#include <pcl/console/parse.h>
#include <pcl/io/openni_grabber.h>
#include <pcl/visualization/cloud_viewer.h>
#include <pcl/point_cloud.h>
#include <boost/asio.hpp>
-#include <cstdio>
-#include <cstdlib>
#include <iostream>
-#include <sstream>
-#include <string>
#include <thread>
#include <vector>
std::size_t cloud_size = width_arg * height_arg;
// Reset point cloud
- cloud_arg.points.clear();
- cloud_arg.points.reserve(cloud_size);
+ cloud_arg.clear();
+ cloud_arg.reserve(cloud_size);
// Define point cloud parameters
cloud_arg.width = static_cast<std::uint32_t>(width_arg);
}
// Add point to cloud
- cloud_arg.points.push_back(newPoint);
+ cloud_arg.push_back(newPoint);
// Increment point iterator
++i;
}
#include <pcl/common/time.h>
#include <pcl/common/transforms.h>
#include <pcl/console/parse.h>
-#include <pcl/features/integral_image_normal.h>
-#include <pcl/features/normal_3d.h>
#include <pcl/features/normal_3d_omp.h>
#include <pcl/filters/approximate_voxel_grid.h>
#include <pcl/filters/extract_indices.h>
#include <pcl/filters/project_inliers.h>
#include <pcl/filters/voxel_grid.h>
#include <pcl/io/openni_grabber.h>
-#include <pcl/io/pcd_io.h>
#include <pcl/sample_consensus/method_types.h>
#include <pcl/sample_consensus/model_types.h>
-#include <pcl/search/pcl_search.h>
#include <pcl/segmentation/extract_clusters.h>
#include <pcl/segmentation/extract_polygonal_prism_data.h>
#include <pcl/segmentation/sac_segmentation.h>
result.width = cloud->width;
result.height = cloud->height;
result.is_dense = cloud->is_dense;
- for (std::size_t i = 0; i < cloud->size(); i++) {
+ for (const auto& pt : *cloud) {
RefPointType point;
- point.x = (*cloud)[i].x;
- point.y = (*cloud)[i].y;
- point.z = (*cloud)[i].z;
- point.rgba = (*cloud)[i].rgba;
- result.points.push_back(point);
+ point.x = pt.x;
+ point.y = pt.y;
+ point.z = pt.z;
+ point.rgba = pt.rgba;
+ result.push_back(point);
}
}
void
removeZeroPoints(const CloudConstPtr& cloud, Cloud& result)
{
- for (const auto& point: *cloud) {
+ for (const auto& point : *cloud) {
if (!(std::abs(point.x) < 0.01 && std::abs(point.y) < 0.01 &&
std::abs(point.z) < 0.01) &&
!std::isnan(point.x) && !std::isnan(point.y) && !std::isnan(point.z))
- result.points.push_back(point);
+ result.push_back(point);
}
result.width = result.size();
Cloud& result)
{
pcl::PointIndices segmented_indices = cluster_indices[segment_index];
- for (const int& index : segmented_indices.indices) {
+ for (const auto& index : segmented_indices.indices) {
PointType point = (*cloud)[index];
- result.points.push_back(point);
+ result.push_back(point);
}
result.width = result.size();
result.height = 1;
#include <pcl/common/time.h>
#include <pcl/console/parse.h>
-#include <pcl/filters/approximate_voxel_grid.h>
#include <pcl/filters/voxel_grid.h>
#include <pcl/io/openni_camera/openni_driver.h>
#include <pcl/io/openni_grabber.h>
#include <pcl/apps/organized_segmentation_demo.h>
#include <pcl/common/angles.h>
+#include <pcl/io/openni_grabber.h> // for OpenNIGrabber
#include <pcl/segmentation/extract_polygonal_prism_data.h>
-#include <pcl/surface/convex_hull.h>
#include <pcl/memory.h> // for pcl::dynamic_pointer_cast
#include <boost/signals2/connection.hpp> // for boost::signals2::connection
#include <QMutexLocker>
#include <QObject>
+#include <vtkGenericOpenGLRenderWindow.h>
#include <vtkRenderWindow.h>
+#include <vtkRendererCollection.h>
// #include <boost/filesystem.hpp> // for boost::filesystem::directory_iterator
#include <boost/signals2/connection.hpp> // for boost::signals2::connection
ui_->setupUi(this);
this->setWindowTitle("PCL Organized Connected Component Segmentation Demo");
+
+#if VTK_MAJOR_VERSION > 8
+ auto renderer = vtkSmartPointer<vtkRenderer>::New();
+ auto renderWindow = vtkSmartPointer<vtkGenericOpenGLRenderWindow>::New();
+ renderWindow->AddRenderer(renderer);
+ vis_.reset(new pcl::visualization::PCLVisualizer(renderer, renderWindow, "", false));
+#else
vis_.reset(new pcl::visualization::PCLVisualizer("", false));
- ui_->qvtk_widget->SetRenderWindow(vis_->getRenderWindow());
- vis_->setupInteractor(ui_->qvtk_widget->GetInteractor(),
- ui_->qvtk_widget->GetRenderWindow());
+#endif // VTK_MAJOR_VERSION > 8
+ setRenderWindowCompat(*(ui_->qvtk_widget), *(vis_->getRenderWindow()));
+ vis_->setupInteractor(getInteractorCompat(*(ui_->qvtk_widget)),
+ getRenderWindowCompat(*(ui_->qvtk_widget)));
+
+ refreshView();
+
vis_->getInteractorStyle()->setKeyboardModifier(
pcl::visualization::INTERACTOR_KB_MOD_SHIFT);
- ui_->qvtk_widget->update();
std::function<void(const CloudConstPtr&)> f = [this](const CloudConstPtr& cloud) {
cloud_cb(cloud);
grabber_.start();
}
+void
+OrganizedSegmentationDemo::refreshView()
+{
+#if VTK_MAJOR_VERSION > 8
+ ui_->qvtk_widget->renderWindow()->Render();
+#else
+ ui_->qvtk_widget->update();
+#endif // VTK_MAJOR_VERSION > 8
+}
+
void
OrganizedSegmentationDemo::cloud_cb(const CloudConstPtr& cloud)
{
data_modified_ = false;
}
}
-
- ui_->qvtk_widget->update();
+ refreshView();
}
void
</property>
<layout class="QGridLayout" name="gridLayout">
<item row="0" column="3">
- <widget class="QVTKWidget" name="qvtk_widget">
+ <widget class="PCLQVTKWidget" name="qvtk_widget">
<property name="sizePolicy">
<sizepolicy hsizetype="Expanding" vsizetype="Expanding">
<horstretch>255</horstretch>
</widget>
<customwidgets>
<customwidget>
- <class>QVTKWidget</class>
- <extends>QWidget</extends>
- <header>QVTKWidget.h</header>
+ <class>PCLQVTKWidget</class>
+ <extends>QOpenGLWidget</extends>
+ <header location="global">pcl/visualization/qvtk_compatibility.h</header>
</customwidget>
</customwidgets>
<resources>
#include <pcl/console/parse.h>
#include <pcl/features/integral_image_normal.h>
#include <pcl/geometry/polygon_operations.h>
-#include <pcl/io/io.h>
#include <pcl/io/pcd_io.h>
#include <pcl/segmentation/organized_connected_component_segmentation.h>
#include <pcl/segmentation/organized_multi_plane_segmentation.h>
"approx_plane_%02zu_%03zu",
static_cast<std::size_t>(i),
static_cast<std::size_t>(idx));
- viewer.addLine(
- (*approx_contour)[idx],
- (*approx_contour)[(idx + 1) % approx_contour->size()],
- 0.5 * red[i],
- 0.5 * grn[i],
- 0.5 * blu[i],
- name);
+ viewer.addLine((*approx_contour)[idx],
+ (*approx_contour)[(idx + 1) % approx_contour->size()],
+ 0.5 * red[i],
+ 0.5 * grn[i],
+ 0.5 * blu[i],
+ name);
}
}
}
#include <pcl/filters/project_inliers.h>
#include <pcl/geometry/polygon_operations.h>
#include <pcl/io/pcd_io.h>
+#include <pcl/sample_consensus/sac_model_plane.h> // for pointToPlaneDistance
#include <pcl/segmentation/edge_aware_plane_comparator.h>
#include <pcl/segmentation/euclidean_cluster_comparator.h>
#include <pcl/segmentation/extract_clusters.h>
* \param[out] object the segmented resultant object
*/
void
- segmentObject(int picked_idx,
+ segmentObject(pcl::index_t picked_idx,
const typename PointCloud<PointT>::ConstPtr& cloud,
const PointIndices::Ptr& plane_indices,
PointCloud<PointT>& object)
exppd.setInputCloud(cloud);
exppd.setIndices(indices_but_the_plane);
exppd.setInputPlanarHull(plane_hull);
- exppd.setViewPoint((*cloud)[picked_idx].x,
- (*cloud)[picked_idx].y,
- (*cloud)[picked_idx].z);
+ exppd.setViewPoint(
+ (*cloud)[picked_idx].x, (*cloud)[picked_idx].y, (*cloud)[picked_idx].z);
exppd.setHeightLimits(0.001, 0.5); // up to half a meter
exppd.segment(*points_above_plane);
PointCloud<Label>::Ptr scene(
new PointCloud<Label>(cloud->width, cloud->height, l));
// Mask the objects that we want to split into clusters
- for (const int& index : points_above_plane->indices)
+ for (const auto& index : points_above_plane->indices)
(*scene)[index].label = 1;
euclidean_cluster_comparator->setLabels(scene);
/////////////////////////////////////////////////////////////////////////
void
segment(const PointT& picked_point,
- int picked_idx,
+ pcl::index_t picked_idx,
PlanarRegion<PointT>& region,
typename PointCloud<PointT>::Ptr& object)
{
if (idx == -1)
return;
- std::vector<int> indices(1);
+ pcl::Indices indices(1);
std::vector<float> distances(1);
// Get the point that was picked
picked_pt.z);
// Add a sphere to it in the PCLVisualizer window
- std::stringstream ss;
- ss << "sphere_" << idx;
- cloud_viewer_->addSphere(picked_pt, 0.01, 1.0, 0.0, 0.0, ss.str());
+ const std::string sphere_name = "sphere_" + std::to_string(idx);
+ cloud_viewer_->addSphere(picked_pt, 0.01, 1.0, 0.0, 0.0, sphere_name);
// Because VTK/OpenGL stores data without NaN, we lose the 1-1 correspondence, so we
// must search for the real point
#include <QMutexLocker>
#include <QObject>
#include <QRadioButton>
+#include <ui_pcd_video_player.h>
#include <vtkCamera.h>
+#include <vtkGenericOpenGLRenderWindow.h>
#include <vtkRenderWindow.h>
#include <vtkRendererCollection.h>
// Setup the cloud pointer
cloud_.reset(new pcl::PointCloud<pcl::PointXYZRGBA>);
- // Set up the qvtk window
+ // Create the QVTKWidget
+#if VTK_MAJOR_VERSION > 8
+ auto renderer = vtkSmartPointer<vtkRenderer>::New();
+ auto renderWindow = vtkSmartPointer<vtkGenericOpenGLRenderWindow>::New();
+ renderWindow->AddRenderer(renderer);
+ vis_.reset(new pcl::visualization::PCLVisualizer(renderer, renderWindow, "", false));
+#else
vis_.reset(new pcl::visualization::PCLVisualizer("", false));
- ui_->qvtkWidget->SetRenderWindow(vis_->getRenderWindow());
- vis_->setupInteractor(ui_->qvtkWidget->GetInteractor(),
- ui_->qvtkWidget->GetRenderWindow());
+#endif // VTK_MAJOR_VERSION > 8
+ setRenderWindowCompat(*(ui_->qvtk_widget), *(vis_->getRenderWindow()));
+ vis_->setupInteractor(getInteractorCompat(*(ui_->qvtk_widget)),
+ getRenderWindowCompat(*(ui_->qvtk_widget)));
+
vis_->getInteractorStyle()->setKeyboardModifier(
pcl::visualization::INTERACTOR_KB_MOD_SHIFT);
- ui_->qvtkWidget->update();
+
+ refreshView();
// Connect all buttons
connect(ui_->playButton, SIGNAL(clicked()), this, SLOT(playButtonPressed()));
}
cloud_modified_ = false;
}
- ui_->qvtkWidget->update();
+
+ refreshView();
}
void
cloud_modified_ = true;
}
+void
+PCDVideoPlayer::refreshView()
+{
+#if VTK_MAJOR_VERSION > 8
+ ui_->qvtk_widget->renderWindow()->Render();
+#else
+ ui_->qvtk_widget->update();
+#endif // VTK_MAJOR_VERSION > 8
+}
+
void
print_usage()
{
<string>MainWindow</string>
</property>
<widget class="QWidget" name="centralwidget">
- <widget class="QWidget" name="verticalLayoutWidget">
- <property name="geometry">
- <rect>
- <x>339</x>
- <y>10</y>
- <width>451</width>
- <height>531</height>
- </rect>
- </property>
- <layout class="QVBoxLayout" name="verticalLayout">
- <item>
- <widget class="QVTKWidget" name="qvtkWidget"/>
- </item>
- <item>
- <widget class="QSlider" name="indexSlider">
- <property name="orientation">
- <enum>Qt::Horizontal</enum>
- </property>
- </widget>
- </item>
- </layout>
- </widget>
- <widget class="QWidget" name="verticalLayoutWidget_2">
- <property name="geometry">
- <rect>
- <x>9</x>
- <y>9</y>
- <width>321</width>
- <height>531</height>
- </rect>
- </property>
- <layout class="QVBoxLayout" name="verticalLayout_2">
- <item>
- <widget class="QPushButton" name="selectFilesButton">
- <property name="text">
- <string>Select PCD Files</string>
- </property>
- </widget>
- </item>
- <item>
- <widget class="QPushButton" name="selectFolderButton">
- <property name="text">
- <string>Select Folder</string>
- </property>
- </widget>
- </item>
- <item>
- <widget class="Line" name="line_2">
- <property name="orientation">
- <enum>Qt::Horizontal</enum>
- </property>
- </widget>
- </item>
- <item>
- <spacer name="verticalSpacer">
- <property name="orientation">
- <enum>Qt::Vertical</enum>
- </property>
- <property name="sizeHint" stdset="0">
- <size>
- <width>20</width>
- <height>40</height>
- </size>
- </property>
- </spacer>
- </item>
- <item>
- <widget class="Line" name="line_3">
- <property name="orientation">
- <enum>Qt::Horizontal</enum>
- </property>
- </widget>
- </item>
- <item>
- <layout class="QHBoxLayout" name="horizontalLayout">
- <item>
- <widget class="QToolButton" name="backButton">
- <property name="text">
- <string>...</string>
- </property>
- <property name="arrowType">
- <enum>Qt::LeftArrow</enum>
- </property>
- </widget>
- </item>
- <item>
- <widget class="QToolButton" name="stopButton">
- <property name="text">
- <string>Stop</string>
- </property>
- </widget>
- </item>
- <item>
- <widget class="QToolButton" name="playButton">
- <property name="text">
- <string>Play</string>
- </property>
- <property name="arrowType">
- <enum>Qt::NoArrow</enum>
- </property>
- </widget>
- </item>
- <item>
- <widget class="QToolButton" name="nextButton">
- <property name="text">
- <string>...</string>
- </property>
- <property name="arrowType">
- <enum>Qt::RightArrow</enum>
- </property>
- </widget>
- </item>
- </layout>
- </item>
- </layout>
- </widget>
+ <layout class="QHBoxLayout" name="horizontalLayout_2">
+ <item>
+ <layout class="QVBoxLayout" name="verticalLayout_2">
+ <item>
+ <widget class="QPushButton" name="selectFilesButton">
+ <property name="text">
+ <string>Select PCD Files</string>
+ </property>
+ </widget>
+ </item>
+ <item>
+ <widget class="QPushButton" name="selectFolderButton">
+ <property name="text">
+ <string>Select Folder</string>
+ </property>
+ </widget>
+ </item>
+ <item>
+ <widget class="Line" name="line_2">
+ <property name="orientation">
+ <enum>Qt::Horizontal</enum>
+ </property>
+ </widget>
+ </item>
+ <item>
+ <spacer name="verticalSpacer">
+ <property name="orientation">
+ <enum>Qt::Vertical</enum>
+ </property>
+ <property name="sizeHint" stdset="0">
+ <size>
+ <width>20</width>
+ <height>40</height>
+ </size>
+ </property>
+ </spacer>
+ </item>
+ <item>
+ <widget class="Line" name="line_3">
+ <property name="orientation">
+ <enum>Qt::Horizontal</enum>
+ </property>
+ </widget>
+ </item>
+ <item>
+ <layout class="QHBoxLayout" name="horizontalLayout">
+ <item>
+ <widget class="QToolButton" name="backButton">
+ <property name="text">
+ <string>...</string>
+ </property>
+ <property name="arrowType">
+ <enum>Qt::LeftArrow</enum>
+ </property>
+ </widget>
+ </item>
+ <item>
+ <widget class="QToolButton" name="stopButton">
+ <property name="text">
+ <string>Stop</string>
+ </property>
+ </widget>
+ </item>
+ <item>
+ <widget class="QToolButton" name="playButton">
+ <property name="text">
+ <string>Play</string>
+ </property>
+ <property name="arrowType">
+ <enum>Qt::NoArrow</enum>
+ </property>
+ </widget>
+ </item>
+ <item>
+ <widget class="QToolButton" name="nextButton">
+ <property name="text">
+ <string>...</string>
+ </property>
+ <property name="arrowType">
+ <enum>Qt::RightArrow</enum>
+ </property>
+ </widget>
+ </item>
+ </layout>
+ </item>
+ </layout>
+ </item>
+ <item>
+ <layout class="QVBoxLayout" name="verticalLayout">
+ <item>
+ <widget class="PCLQVTKWidget" name="qvtk_widget"/>
+ </item>
+ <item>
+ <widget class="QSlider" name="indexSlider">
+ <property name="orientation">
+ <enum>Qt::Horizontal</enum>
+ </property>
+ </widget>
+ </item>
+ </layout>
+ </item>
+ </layout>
</widget>
<widget class="QMenuBar" name="menubar">
<property name="geometry">
<x>0</x>
<y>0</y>
<width>800</width>
- <height>25</height>
+ <height>21</height>
</rect>
</property>
</widget>
</widget>
<customwidgets>
<customwidget>
- <class>QVTKWidget</class>
- <extends>QWidget</extends>
- <header>QVTKWidget.h</header>
+ <class>PCLQVTKWidget</class>
+ <extends>QOpenGLWidget</extends>
+ <header location="global">pcl/visualization/qvtk_compatibility.h</header>
</customwidget>
</customwidgets>
<resources/>
while (cloud_scene->size() > 0.3 * nr_points) {
seg.setInputCloud(cloud_scene);
seg.segment(*inliers, *coefficients);
- PCL_INFO("Plane inliers: %zu\n",
- static_cast<std::size_t>(inliers->indices.size()));
+ PCL_INFO("Plane inliers: %zu\n", static_cast<std::size_t>(inliers->indices.size()));
if (inliers->indices.size() < 50000)
break;
pcl::transformPointCloud(
*cloud_models[model_i], *cloud_output, final_transformation);
- std::stringstream ss;
- ss << "model_" << model_i;
+ const std::string mode_name = "model_" + std::to_string(model_i);
visualization::PointCloudColorHandlerRandom<PointXYZ> random_color(
cloud_output->makeShared());
- viewer.addPointCloud(cloud_output, random_color, ss.str());
- PCL_INFO("Showing model %s\n", ss.str().c_str());
+ viewer.addPointCloud(cloud_output, random_color, mode_name);
+ PCL_INFO("Showing model %s\n", mode_name.c_str());
}
PCL_INFO("All models have been registered!\n");
*/
#include <pcl/apps/render_views_tesselated_sphere.h>
+#include <pcl/visualization/vtk/pcl_vtk_compatibility.h>
#include <pcl/point_types.h>
#include <vtkActor.h>
#include <vtkCellArray.h>
#include <vtkCellData.h>
#include <vtkHardwareSelector.h>
+#include <vtkIdTypeArray.h>
#include <vtkLoopSubdivisionFilter.h>
#include <vtkPlatonicSolidSource.h>
#include <vtkPointPicker.h>
{
// center object
double CoM[3];
- vtkIdType npts_com = 0, *ptIds_com = nullptr;
+ vtkIdType npts_com = 0;
+ vtkCellPtsPtr ptIds_com = nullptr;
vtkSmartPointer<vtkCellArray> cells_com = polydata_->GetPolys();
double center[3], p1_com[3], p2_com[3], p3_com[3], totalArea_com = 0;
// * Compute area of the mesh
//////////////////////////////
vtkSmartPointer<vtkCellArray> cells = mapper->GetInput()->GetPolys();
- vtkIdType npts = 0, *ptIds = nullptr;
+ vtkIdType npts = 0;
+ vtkCellPtsPtr ptIds = nullptr;
double p1[3], p2[3], p3[3], totalArea = 0;
for (cells->InitTraversal(); cells->GetNextCell(npts, ptIds);) {
polydata->BuildCells();
vtkSmartPointer<vtkCellArray> cells = polydata->GetPolys();
- vtkIdType npts = 0, *ptIds = nullptr;
+ vtkIdType npts = 0;
+ vtkCellPtsPtr ptIds = nullptr;
double p1[3], p2[3], p3[3], area, totalArea = 0;
for (cells->InitTraversal(); cells->GetNextCell(npts, ptIds);) {
#include <pcl/common/centroid.h> // for computeMeanAndCovarianceMatrix
#include <pcl/common/distances.h>
#include <pcl/common/intersections.h>
-#include <pcl/common/time.h>
#include <pcl/features/integral_image_normal.h>
-#include <pcl/io/io.h>
-#include <pcl/io/openni_grabber.h>
-#include <pcl/io/pcd_grabber.h>
#include <pcl/io/pcd_io.h>
-#include <pcl/io/ply_io.h>
-#include <pcl/io/png_io.h>
#include <pcl/sample_consensus/sac_model_plane.h>
#include <pcl/segmentation/euclidean_cluster_comparator.h>
#include <pcl/segmentation/ground_plane_comparator.h>
#include <pcl/visualization/pcl_visualizer.h>
#include <pcl/ModelCoefficients.h>
+#include <boost/filesystem.hpp> // for directory_iterator
+
#include <mutex>
using PointT = pcl::PointXYZRGB;
// note the NAN points in the image as well
for (std::size_t i = 0; i < cloud->size(); i++) {
if (!pcl::isFinite((*cloud)[i])) {
- (*ground_image)[i].b =
- static_cast<std::uint8_t>(((*cloud)[i].b + 255) / 2);
+ (*ground_image)[i].b = static_cast<std::uint8_t>(((*cloud)[i].b + 255) / 2);
(*label_image)[i].r = 0;
(*label_image)[i].g = 0;
(*label_image)[i].b = 255;
query.y = 0.5;
query.z = 0.5;
- std::vector<int> kd_indices;
+ pcl::Indices kd_indices;
std::vector<float> kd_distances;
- std::vector<int> bf_indices;
+ pcl::Indices bf_indices;
std::vector<float> bf_distances;
double start, stop;
--- /dev/null
+set(SUBSYS_NAME benchmarks)
+set(SUBSYS_DESC "Point cloud library benchmarks")
+set(SUBSYS_DEPS common filters features search kdtree io)
+set(DEFAULT OFF)
+set(build TRUE)
+set(REASON "Disabled by default")
+PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ${DEFAULT} "${REASON}")
+PCL_SUBSYS_DEPEND(build "${SUBSYS_NAME}" DEPS ${SUBSYS_DEPS})
+if(NOT build)
+ return()
+endif()
+
+find_package(benchmark REQUIRED)
+add_custom_target(run_benchmarks)
+
+PCL_ADD_BENCHMARK(features_normal_3d FILES features/normal_3d.cpp
+ LINK_WITH pcl_io pcl_search pcl_features
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/table_scene_mug_stereo_textured.pcd"
+ "${PCL_SOURCE_DIR}/test/milk_cartoon_all_small_clorox.pcd")
+
+PCL_ADD_BENCHMARK(filters_voxel_grid FILES filters/voxel_grid.cpp
+ LINK_WITH pcl_io pcl_filters
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/table_scene_mug_stereo_textured.pcd"
+ "${PCL_SOURCE_DIR}/test/milk_cartoon_all_small_clorox.pcd")
+
--- /dev/null
+#include <pcl/features/normal_3d.h> // for NormalEstimation
+#include <pcl/features/normal_3d_omp.h> // for NormalEstimationOMP
+#include <pcl/io/pcd_io.h> // for PCDReader
+
+#include <benchmark/benchmark.h>
+
+static void
+BM_NormalEstimation(benchmark::State& state, const std::string& file)
+{
+ // Perform setup here
+ pcl::PointCloud<pcl::PointXYZ>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZ>);
+ pcl::PCDReader reader;
+ reader.read(file, *cloud);
+ pcl::NormalEstimation<pcl::PointXYZ, pcl::Normal> ne;
+ ne.setInputCloud(cloud);
+ ne.setKSearch(state.range(0));
+ pcl::PointCloud<pcl::Normal>::Ptr cloud_normals(new pcl::PointCloud<pcl::Normal>);
+ for (auto _ : state) {
+ // This code gets timed
+ ne.compute(*cloud_normals);
+ }
+}
+
+#ifdef _OPENMP
+static void
+BM_NormalEstimationOMP(benchmark::State& state, const std::string& file)
+{
+ // Perform setup here
+ pcl::PointCloud<pcl::PointXYZ>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZ>);
+ pcl::PCDReader reader;
+ reader.read(file, *cloud);
+ pcl::NormalEstimationOMP<pcl::PointXYZ, pcl::Normal> ne;
+ ne.setInputCloud(cloud);
+ ne.setKSearch(100);
+ pcl::PointCloud<pcl::Normal>::Ptr cloud_normals(new pcl::PointCloud<pcl::Normal>);
+ for (auto _ : state) {
+ // This code gets timed
+ ne.compute(*cloud_normals);
+ }
+}
+#endif
+
+int
+main(int argc, char** argv)
+{
+ if (argc < 3) {
+ std::cerr
+ << "No test files given. Please download `table_scene_mug_stereo_textured.pcd` "
+ "and `milk_cartoon_all_small_clorox.pcd`, and pass their paths to the test."
+ << std::endl;
+ return (-1);
+ }
+ benchmark::RegisterBenchmark("BM_NormalEstimation_mug", &BM_NormalEstimation, argv[1])
+ ->Arg(50)
+ ->Arg(100)
+ ->Unit(benchmark::kMillisecond);
+ benchmark::RegisterBenchmark(
+ "BM_NormalEstimation_milk", &BM_NormalEstimation, argv[2])
+ ->Arg(50)
+ ->Arg(100)
+ ->Unit(benchmark::kMillisecond);
+#ifdef _OPENMP
+ benchmark::RegisterBenchmark(
+ "BM_NormalEstimationOMP", &BM_NormalEstimationOMP, argv[1])
+ ->Unit(benchmark::kMillisecond);
+#endif
+ benchmark::Initialize(&argc, argv);
+ benchmark::RunSpecifiedBenchmarks();
+}
--- /dev/null
+#include <pcl/filters/approximate_voxel_grid.h>
+#include <pcl/filters/voxel_grid.h>
+#include <pcl/io/pcd_io.h> // for PCDReader
+
+#include <benchmark/benchmark.h>
+
+static void
+BM_VoxelGrid(benchmark::State& state, const std::string& file)
+{
+ // Perform setup here
+ pcl::PointCloud<pcl::PointXYZ>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZ>);
+ pcl::PCDReader reader;
+ reader.read(file, *cloud);
+
+ pcl::VoxelGrid<pcl::PointXYZ> vg;
+ vg.setLeafSize(0.01, 0.01, 0.01);
+ vg.setInputCloud(cloud);
+
+ pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_voxelized(
+ new pcl::PointCloud<pcl::PointXYZ>);
+ for (auto _ : state) {
+ // This code gets timed
+ vg.filter(*cloud_voxelized);
+ }
+}
+
+static void
+BM_ApproxVoxelGrid(benchmark::State& state, const std::string& file)
+{
+ // Perform setup here
+ pcl::PointCloud<pcl::PointXYZ>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZ>);
+ pcl::PCDReader reader;
+ reader.read(file, *cloud);
+
+ pcl::ApproximateVoxelGrid<pcl::PointXYZ> avg;
+ avg.setLeafSize(0.01, 0.01, 0.01);
+ avg.setInputCloud(cloud);
+
+ pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_voxelized(
+ new pcl::PointCloud<pcl::PointXYZ>);
+ for (auto _ : state) {
+ // This code gets timed
+ avg.filter(*cloud_voxelized);
+ }
+}
+
+int
+main(int argc, char** argv)
+{
+ if (argc < 3) {
+ std::cerr
+ << "No test files given. Please download `table_scene_mug_stereo_textured.pcd` "
+ "and `milk_cartoon_all_small_clorox.pcd`, and pass their paths to the test."
+ << std::endl;
+ return (-1);
+ }
+
+ benchmark::RegisterBenchmark("BM_VoxelGrid_milk", &BM_VoxelGrid, argv[2])
+ ->Unit(benchmark::kMillisecond);
+ benchmark::RegisterBenchmark(
+ "BM_ApproximateVoxelGrid_milk", &BM_ApproxVoxelGrid, argv[2])
+ ->Unit(benchmark::kMillisecond);
+
+ benchmark::RegisterBenchmark("BM_VoxelGrid_mug", &BM_VoxelGrid, argv[1])
+ ->Unit(benchmark::kMillisecond);
+ benchmark::RegisterBenchmark(
+ "BM_ApproximateVoxelGrid_mug", &BM_ApproxVoxelGrid, argv[1])
+ ->Unit(benchmark::kMillisecond);
+
+ benchmark::Initialize(&argc, argv);
+ benchmark::RunSpecifiedBenchmarks();
+}
# First try to locate FLANN using modern config
find_package(flann NO_MODULE ${FLANN_FIND_VERSION} QUIET)
+
if(flann_FOUND)
unset(flann_FOUND)
set(FLANN_FOUND ON)
+
# Create interface library that effectively becomes an alias for the appropriate (static/dynamic) imported FLANN target
add_library(FLANN::FLANN INTERFACE IMPORTED)
- if(FLANN_USE_STATIC)
+
+ if(TARGET flann::flann_cpp_s AND TARGET flann::flann_cpp)
+ if(PCL_FLANN_REQUIRED_TYPE MATCHES "DONTCARE")
+ if(PCL_SHARED_LIBS)
+ set_property(TARGET FLANN::FLANN APPEND PROPERTY INTERFACE_LINK_LIBRARIES flann::flann_cpp)
+ set(FLANN_LIBRARY_TYPE SHARED)
+ else()
+ set_property(TARGET FLANN::FLANN APPEND PROPERTY INTERFACE_LINK_LIBRARIES flann::flann_cpp_s)
+ set(FLANN_LIBRARY_TYPE STATIC)
+ endif()
+ elseif(PCL_FLANN_REQUIRED_TYPE MATCHES "SHARED")
+ set_property(TARGET FLANN::FLANN APPEND PROPERTY INTERFACE_LINK_LIBRARIES flann::flann_cpp)
+ set(FLANN_LIBRARY_TYPE SHARED)
+ else()
+ set_property(TARGET FLANN::FLANN APPEND PROPERTY INTERFACE_LINK_LIBRARIES flann::flann_cpp_s)
+ set(FLANN_LIBRARY_TYPE STATIC)
+ endif()
+ elseif(TARGET flann::flann_cpp_s)
set_property(TARGET FLANN::FLANN APPEND PROPERTY INTERFACE_LINK_LIBRARIES flann::flann_cpp_s)
+ set(FLANN_LIBRARY_TYPE STATIC)
else()
set_property(TARGET FLANN::FLANN APPEND PROPERTY INTERFACE_LINK_LIBRARIES flann::flann_cpp)
+ set(FLANN_LIBRARY_TYPE SHARED)
endif()
+
# Determine FLANN installation root based on the path to the processed Config file
get_filename_component(_config_dir "${flann_CONFIG}" DIRECTORY)
get_filename_component(FLANN_ROOT "${_config_dir}/../../.." ABSOLUTE)
include
)
-if(FLANN_USE_STATIC)
- set(FLANN_RELEASE_NAME flann_cpp_s)
- set(FLANN_DEBUG_NAME flann_cpp_s-gd)
- set(FLANN_LIBRARY_TYPE STATIC)
-else()
- set(FLANN_RELEASE_NAME flann_cpp)
- set(FLANN_DEBUG_NAME flann_cpp-gd)
- set(FLANN_LIBRARY_TYPE SHARED)
-endif()
+find_library(FLANN_LIBRARY_SHARED
+ NAMES
+ flann_cpp
+ HINTS
+ ${PC_FLANN_LIBRARY_DIRS}
+ ${FLANN_ROOT}
+ $ENV{FLANN_ROOT}
+ PATHS
+ $ENV{PROGRAMFILES}/Flann
+ $ENV{PROGRAMW6432}/Flann
+ PATH_SUFFIXES
+ lib
+)
-find_library(FLANN_LIBRARY
+find_library(FLANN_LIBRARY_DEBUG_SHARED
NAMES
- ${FLANN_RELEASE_NAME}
+ flann_cpp-gd flann_cppd
HINTS
${PC_FLANN_LIBRARY_DIRS}
${FLANN_ROOT}
lib
)
-find_library(FLANN_LIBRARY_DEBUG
+find_library(FLANN_LIBRARY_STATIC
NAMES
- ${FLANN_DEBUG_NAME}
+ flann_cpp_s
HINTS
${PC_FLANN_LIBRARY_DIRS}
${FLANN_ROOT}
lib
)
+find_library(FLANN_LIBRARY_DEBUG_STATIC
+ NAMES
+ flann_cpp_s-gd flann_cpp_sd
+ HINTS
+ ${PC_FLANN_LIBRARY_DIRS}
+ ${FLANN_ROOT}
+ $ENV{FLANN_ROOT}
+ PATHS
+ $ENV{PROGRAMFILES}/Flann
+ $ENV{PROGRAMW6432}/Flann
+ PATH_SUFFIXES
+ lib
+)
+
+if(FLANN_LIBRARY_SHARED AND FLANN_LIBRARY_STATIC)
+ if(PCL_FLANN_REQUIRED_TYPE MATCHES "DONTCARE")
+ if(PCL_SHARED_LIBS)
+ set(FLANN_LIBRARY_TYPE SHARED)
+ set(FLANN_LIBRARY ${FLANN_LIBRARY_SHARED})
+ else()
+ set(FLANN_LIBRARY_TYPE STATIC)
+ set(FLANN_LIBRARY ${FLANN_LIBRARY_STATIC})
+ endif()
+ elseif(PCL_FLANN_REQUIRED_TYPE MATCHES "SHARED")
+ set(FLANN_LIBRARY_TYPE SHARED)
+ set(FLANN_LIBRARY ${FLANN_LIBRARY_SHARED})
+ else()
+ set(FLANN_LIBRARY_TYPE STATIC)
+ set(FLANN_LIBRARY ${FLANN_LIBRARY_STATIC})
+ endif()
+elseif(FLANN_LIBRARY_STATIC)
+ set(FLANN_LIBRARY_TYPE STATIC)
+ set(FLANN_LIBRARY ${FLANN_LIBRARY_STATIC})
+elseif(FLANN_LIBRARY_SHARED)
+ set(FLANN_LIBRARY_TYPE SHARED)
+ set(FLANN_LIBRARY ${FLANN_LIBRARY_SHARED})
+endif()
+
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(
FLANN DEFAULT_MSG
set_target_properties(FLANN::FLANN PROPERTIES INTERFACE_COMPILE_DEFINITIONS "${PC_FLANN_CFLAGS_OTHER}")
set_property(TARGET FLANN::FLANN APPEND PROPERTY IMPORTED_CONFIGURATIONS "RELEASE")
set_target_properties(FLANN::FLANN PROPERTIES IMPORTED_LINK_INTERFACE_LANGUAGES_RELEASE "CXX")
- if(WIN32 AND NOT FLANN_USE_STATIC)
+ if(WIN32 AND (NOT FLANN_LIBRARY_TYPE MATCHES "STATIC"))
set_target_properties(FLANN::FLANN PROPERTIES IMPORTED_IMPLIB_RELEASE "${FLANN_LIBRARY}")
else()
set_target_properties(FLANN::FLANN PROPERTIES IMPORTED_LOCATION_RELEASE "${FLANN_LIBRARY}")
if(FLANN_LIBRARY_DEBUG)
set_property(TARGET FLANN::FLANN APPEND PROPERTY IMPORTED_CONFIGURATIONS "DEBUG")
set_target_properties(FLANN::FLANN PROPERTIES IMPORTED_LINK_INTERFACE_LANGUAGES_DEBUG "CXX")
- if(WIN32 AND NOT FLANN_USE_STATIC)
+ if(WIN32 AND (NOT FLANN_LIBRARY_TYPE MATCHES "STATIC"))
set_target_properties(FLANN::FLANN PROPERTIES IMPORTED_IMPLIB_DEBUG "${FLANN_LIBRARY_DEBUG}")
else()
set_target_properties(FLANN::FLANN PROPERTIES IMPORTED_LOCATION_DEBUG "${FLANN_LIBRARY_DEBUG}")
--- /dev/null
+# Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+# file Copyright.txt or https://cmake.org/licensing for details.
+
+#.rst:
+# FindGLEW
+# --------
+#
+# Find the OpenGL Extension Wrangler Library (GLEW)
+#
+# IMPORTED Targets
+# ^^^^^^^^^^^^^^^^
+#
+# This module defines the :prop_tgt:`IMPORTED` target ``GLEW::GLEW``,
+# if GLEW has been found.
+#
+# Result Variables
+# ^^^^^^^^^^^^^^^^
+#
+# This module defines the following variables:
+#
+# ::
+#
+# GLEW_INCLUDE_DIRS - include directories for GLEW
+# GLEW_LIBRARIES - libraries to link against GLEW
+# GLEW_FOUND - true if GLEW has been found and can be used
+
+find_path(GLEW_INCLUDE_DIR GL/glew.h)
+
+if(NOT GLEW_LIBRARY)
+ find_library(GLEW_LIBRARY_RELEASE NAMES GLEW glew32 glew glew32s PATH_SUFFIXES lib64 libx32)
+ find_library(GLEW_LIBRARY_DEBUG NAMES GLEWd glew32d glewd PATH_SUFFIXES lib64)
+
+ include(SelectLibraryConfigurations)
+ select_library_configurations(GLEW)
+endif ()
+
+include(FindPackageHandleStandardArgs)
+find_package_handle_standard_args(GLEW
+ REQUIRED_VARS GLEW_INCLUDE_DIR GLEW_LIBRARY)
+
+if(GLEW_FOUND)
+ set(GLEW_INCLUDE_DIRS ${GLEW_INCLUDE_DIR})
+
+ if(NOT GLEW_LIBRARIES)
+ set(GLEW_LIBRARIES ${GLEW_LIBRARY})
+ endif()
+
+ if (NOT TARGET GLEW::GLEW)
+ add_library(GLEW::GLEW UNKNOWN IMPORTED)
+ set_target_properties(GLEW::GLEW PROPERTIES
+ INTERFACE_INCLUDE_DIRECTORIES "${GLEW_INCLUDE_DIRS}")
+
+ if(GLEW_LIBRARY_RELEASE)
+ set_property(TARGET GLEW::GLEW APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE)
+ set_target_properties(GLEW::GLEW PROPERTIES IMPORTED_LOCATION_RELEASE "${GLEW_LIBRARY_RELEASE}")
+ endif()
+
+ if(GLEW_LIBRARY_DEBUG)
+ set_property(TARGET GLEW::GLEW APPEND PROPERTY IMPORTED_CONFIGURATIONS DEBUG)
+ set_target_properties(GLEW::GLEW PROPERTIES IMPORTED_LOCATION_DEBUG "${GLEW_LIBRARY_DEBUG}")
+ endif()
+
+ if(NOT GLEW_LIBRARY_RELEASE AND NOT GLEW_LIBRARY_DEBUG)
+ set_property(TARGET GLEW::GLEW APPEND PROPERTY IMPORTED_LOCATION "${GLEW_LIBRARY}")
+ endif()
+ endif()
+endif()
+
+mark_as_advanced(GLEW_INCLUDE_DIR)
\ No newline at end of file
# OPENNI_DEFINITIONS Compiler flags for OpenNI
find_package(PkgConfig QUIET)
-
-# Find LibUSB
-if(NOT WIN32)
- pkg_check_modules(PC_USB_10 libusb-1.0)
- find_path(USB_10_INCLUDE_DIR libusb-1.0/libusb.h
- HINTS ${PC_USB_10_INCLUDEDIR} ${PC_USB_10_INCLUDE_DIRS} "${USB_10_ROOT}" "$ENV{USB_10_ROOT}"
- PATH_SUFFIXES libusb-1.0)
-
- find_library(USB_10_LIBRARY
- NAMES usb-1.0
- HINTS ${PC_USB_10_LIBDIR} ${PC_USB_10_LIBRARY_DIRS} "${USB_10_ROOT}" "$ENV{USB_10_ROOT}"
- PATH_SUFFIXES lib)
-
- include(FindPackageHandleStandardArgs)
- find_package_handle_standard_args(USB_10 DEFAULT_MSG USB_10_LIBRARY USB_10_INCLUDE_DIR)
-
- if(NOT USB_10_FOUND)
- message(STATUS "OpenNI disabled because libusb-1.0 not found.")
- return()
- else()
- include_directories(SYSTEM ${USB_10_INCLUDE_DIR})
- endif()
-endif()
-
pkg_check_modules(PC_OPENNI QUIET libopenni)
set(OPENNI_DEFINITIONS ${PC_OPENNI_CFLAGS_OTHER})
# Libraries
if(CMAKE_SYSTEM_NAME STREQUAL "Darwin")
- set(OPENNI_LIBRARIES ${OPENNI_LIBRARY} ${LIBUSB_1_LIBRARIES})
+ find_package(libusb REQUIRED)
+ set(OPENNI_LIBRARIES ${OPENNI_LIBRARY} libusb::libusb)
else()
set(OPENNI_LIBRARIES ${OPENNI_LIBRARY})
endif()
endif()
+if(EXISTS "${OPENNI_INCLUDE_DIR}/XnVersion.h")
+ file(STRINGS "${OPENNI_INCLUDE_DIR}/XnVersion.h" _contents REGEX "^#define[ \t]+XN_[A-Z]+_VERSION[ \t]+[0-9]+")
+ if(_contents)
+ string(REGEX REPLACE ".*#define[ \t]+XN_MAJOR_VERSION[ \t]+([0-9]+).*" "\\1" OPENNI_VERSION_MAJOR "${_contents}")
+ string(REGEX REPLACE ".*#define[ \t]+XN_MINOR_VERSION[ \t]+([0-9]+).*" "\\1" OPENNI_VERSION_MINOR "${_contents}")
+ string(REGEX REPLACE ".*#define[ \t]+XN_MAINTENANCE_VERSION[ \t]+([0-9]+).*" "\\1" OPENNI_VERSION_PATCH "${_contents}")
+ string(REGEX REPLACE ".*#define[ \t]+XN_BUILD_VERSION[ \t]+([0-9]+).*" "\\1" OPENNI_VERSION_BUILD "${_contents}")
+ set(OPENNI_VERSION "${OPENNI_VERSION_MAJOR}.${OPENNI_VERSION_MINOR}.${OPENNI_VERSION_PATCH}.${OPENNI_VERSION_BUILD}")
+ endif()
+endif()
+
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(OpenNI
FOUND_VAR OPENNI_FOUND
REQUIRED_VARS OPENNI_LIBRARIES OPENNI_INCLUDE_DIRS
+ VERSION_VAR OPENNI_VERSION
)
if(OPENNI_FOUND)
- message(STATUS "OpenNI found (include: ${OPENNI_INCLUDE_DIRS}, lib: ${OPENNI_LIBRARIES})")
+ message(STATUS "OpenNI found (version: ${OPENNI_VERSION}, include: ${OPENNI_INCLUDE_DIRS}, lib: ${OPENNI_LIBRARIES})")
endif()
# OPENNI2_DEFINITIONS Compiler flags for OpenNI2
find_package(PkgConfig QUIET)
-
-# Find LibUSB
-if(NOT WIN32)
- pkg_check_modules(PC_USB_10 libusb-1.0)
- find_path(USB_10_INCLUDE_DIR libusb-1.0/libusb.h
- HINTS ${PC_USB_10_INCLUDEDIR} ${PC_USB_10_INCLUDE_DIRS} "${USB_10_ROOT}" "$ENV{USB_10_ROOT}"
- PATH_SUFFIXES libusb-1.0)
-
- find_library(USB_10_LIBRARY
- NAMES usb-1.0
- HINTS ${PC_USB_10_LIBDIR} ${PC_USB_10_LIBRARY_DIRS} "${USB_10_ROOT}" "$ENV{USB_10_ROOT}"
- PATH_SUFFIXES lib)
-
- include(FindPackageHandleStandardArgs)
- find_package_handle_standard_args(USB_10 DEFAULT_MSG USB_10_LIBRARY USB_10_INCLUDE_DIR)
-
- if(NOT USB_10_FOUND)
- message(STATUS "OpenNI 2 disabled because libusb-1.0 not found.")
- return()
- else()
- include_directories(SYSTEM ${USB_10_INCLUDE_DIR})
- endif()
-endif()
-
pkg_check_modules(PC_OPENNI2 QUIET libopenni2)
set(OPENNI2_DEFINITIONS ${PC_OPENNI_CFLAGS_OTHER})
# Libraries
if(CMAKE_SYSTEM_NAME STREQUAL "Darwin")
- set(OPENNI2_LIBRARIES ${OPENNI2_LIBRARY} ${LIBUSB_1_LIBRARIES})
+ find_package(libusb REQUIRED)
+ set(OPENNI2_LIBRARIES ${OPENNI2_LIBRARY} libusb::libusb)
else()
set(OPENNI2_LIBRARIES ${OPENNI2_LIBRARY})
endif()
endif()
+if(EXISTS "${OPENNI2_INCLUDE_DIR}/OniVersion.h")
+ file(STRINGS "${OPENNI2_INCLUDE_DIR}/OniVersion.h" _contents REGEX "^#define[ \t]+ONI_VERSION_[A-Z]+[ \t]+[0-9]+")
+ if(_contents)
+ string(REGEX REPLACE ".*#define[ \t]+ONI_VERSION_MAJOR[ \t]+([0-9]+).*" "\\1" OPENNI2_VERSION_MAJOR "${_contents}")
+ string(REGEX REPLACE ".*#define[ \t]+ONI_VERSION_MINOR[ \t]+([0-9]+).*" "\\1" OPENNI2_VERSION_MINOR "${_contents}")
+ string(REGEX REPLACE ".*#define[ \t]+ONI_VERSION_MAINTENANCE[ \t]+([0-9]+).*" "\\1" OPENNI2_VERSION_PATCH "${_contents}")
+ string(REGEX REPLACE ".*#define[ \t]+ONI_VERSION_BUILD[ \t]+([0-9]+).*" "\\1" OPENNI2_VERSION_BUILD "${_contents}")
+ set(OPENNI2_VERSION "${OPENNI2_VERSION_MAJOR}.${OPENNI2_VERSION_MINOR}.${OPENNI2_VERSION_PATCH}.${OPENNI2_VERSION_BUILD}")
+ endif()
+endif()
+
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(OpenNI2
FOUND_VAR OPENNI2_FOUND
REQUIRED_VARS OPENNI2_LIBRARIES OPENNI2_INCLUDE_DIRS
+ VERSION_VAR OPENNI2_VERSION
)
if(OPENNI2_FOUND)
- message(STATUS "OpenNI2 found (include: ${OPENNI2_INCLUDE_DIRS}, lib: ${OPENNI2_LIBRARIES})")
+ message(STATUS "OpenNI2 found (version: ${OPENNI2_VERSION}, include: ${OPENNI2_INCLUDE_DIRS}, lib: ${OPENNI2_LIBRARIES})")
endif()
# If QHULL_USE_STATIC is specified then look for static libraries ONLY else
# look for shared ones
-set(QHULL_MAJOR_VERSION 6)
-
if(QHULL_USE_STATIC)
- set(QHULL_RELEASE_NAME qhullstatic)
- set(QHULL_DEBUG_NAME qhullstatic_d)
+ set(QHULL_RELEASE_NAME qhullstatic_r)
+ set(QHULL_DEBUG_NAME qhullstatic_rd)
else()
- set(QHULL_RELEASE_NAME qhull_p qhull${QHULL_MAJOR_VERSION} qhull)
- set(QHULL_DEBUG_NAME qhull_p_d qhull${QHULL_MAJOR_VERSION}_d qhull_d${QHULL_MAJOR_VERSION} qhull_d)
+ set(QHULL_RELEASE_NAME qhull_r qhull)
+ set(QHULL_DEBUG_NAME qhull_rd qhull_d)
endif()
find_file(QHULL_HEADER
if(QHULL_HEADER)
get_filename_component(qhull_header ${QHULL_HEADER} NAME_WE)
if("${qhull_header}" STREQUAL "qhull")
- set(HAVE_QHULL_2011 OFF)
get_filename_component(QHULL_INCLUDE_DIR ${QHULL_HEADER} PATH)
elseif("${qhull_header}" STREQUAL "libqhull")
- set(HAVE_QHULL_2011 ON)
get_filename_component(QHULL_INCLUDE_DIR ${QHULL_HEADER} PATH)
get_filename_component(QHULL_INCLUDE_DIR ${QHULL_INCLUDE_DIR} PATH)
endif()
)
if(MSVC)
- set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /NODEFAULTLIB:LIBCMTD")
+ string(APPEND CMAKE_SHARED_LINKER_FLAGS " /NODEFAULTLIB:LIBCMTD")
endif()
+++ /dev/null
-# - Try to find libusb-1.0
-# Once done this will define
-#
-# LIBUSB_1_FOUND - system has libusb
-# LIBUSB_1_INCLUDE_DIRS - the libusb include directory
-# LIBUSB_1_LIBRARIES - Link these to use libusb
-# LIBUSB_1_DEFINITIONS - Compiler switches required for using libusb
-#
-# Adapted from cmake-modules Google Code project
-#
-# Copyright (c) 2006 Andreas Schneider <mail@cynapses.org>
-#
-# (Changes for libusb) Copyright (c) 2008 Kyle Machulis <kyle@nonpolynomial.com>
-#
-# Point Cloud Library (PCL) - www.pointclouds.org
-# Copyright (c) 2012, Willow Garage, Inc. (use of FindPackageHandleStandardArgs)
-#
-# Redistribution and use is allowed according to the terms of the New BSD license.
-#
-# CMake-Modules Project New BSD License
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-# list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the
-# documentation and/or other materials provided with the distribution.
-#
-# * Neither the name of the CMake-Modules Project nor the names of its
-# contributors may be used to endorse or promote products derived from this
-# software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
-# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
-# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-
-if(LIBUSB_1_LIBRARIES AND LIBUSB_1_INCLUDE_DIRS)
- # in cache already
- set(LIBUSB_FOUND TRUE)
- set(LIBUSB_1_FOUND TRUE)
-else()
- find_path(LIBUSB_1_INCLUDE_DIR
- NAMES libusb-1.0/libusb.h
- PATHS /usr/include /usr/local/include /opt/local/include /sw/include
- PATH_SUFFIXES libusb-1.0)
-
- # We need to look for libusb-1.0 too because find_library does not attempt to find
- # library files with a "lib" prefix implicitly on Windows
- find_library(LIBUSB_1_LIBRARY
- NAMES usb-1.0 libusb-1.0
- PATHS /usr/lib /usr/local/lib /opt/local/lib /sw/lib)
-
- set(LIBUSB_1_INCLUDE_DIRS ${LIBUSB_1_INCLUDE_DIR})
- set(LIBUSB_1_LIBRARIES ${LIBUSB_1_LIBRARY})
-
- include(FindPackageHandleStandardArgs)
- find_package_handle_standard_args(libusb-1.0 DEFAULT_MSG LIBUSB_1_LIBRARY LIBUSB_1_INCLUDE_DIR)
-
- # show the LIBUSB_1_INCLUDE_DIRS and LIBUSB_1_LIBRARIES variables only in the advanced view
- mark_as_advanced(LIBUSB_1_INCLUDE_DIRS LIBUSB_1_LIBRARIES)
-endif()
--- /dev/null
+#.rst:
+# Findlibusb
+# --------
+#
+# Try to find libusb library and headers.
+#
+# IMPORTED Targets
+# ^^^^^^^^^^^^^^^^
+#
+# This module defines the :prop_tgt:`IMPORTED` targets:
+#
+# ``libusb::libusb``
+# Defined if the system has libusb.
+#
+# Result Variables
+# ^^^^^^^^^^^^^^^^
+#
+# This module sets the following variables:
+#
+# ::
+#
+# LIBUSB_FOUND True in case libusb is found, otherwise false
+# LIBUSB_ROOT Path to the root of found libusb installation
+#
+# Example usage
+# ^^^^^^^^^^^^^
+#
+# ::
+#
+# find_package(libusb REQUIRED)
+#
+# add_executable(foo foo.cc)
+# target_link_libraries(foo libusb::libusb)
+#
+
+# Early return if libusb target is already defined. This makes it safe to run
+# this script multiple times.
+if(TARGET libusb::libusb)
+ return()
+endif()
+
+find_package(PkgConfig QUIET)
+if(libusb_FIND_VERSION)
+ pkg_check_modules(PC_libusb libusb-1.0>=${libusb_FIND_VERSION})
+else()
+ pkg_check_modules(PC_libusb libusb-1.0)
+endif()
+
+find_path(libusb_INCLUDE_DIR
+ NAMES
+ libusb-1.0/libusb.h
+ PATHS
+ ${PC_libusb_INCLUDEDIR}
+)
+
+find_library(libusb_LIBRARIES
+ NAMES
+ usb-1.0
+ libusb
+ PATHS
+ ${PC_libusb_LIBRARY_DIRS}
+)
+
+include(FindPackageHandleStandardArgs)
+find_package_handle_standard_args(libusb DEFAULT_MSG libusb_LIBRARIES libusb_INCLUDE_DIR)
+
+mark_as_advanced(libusb_INCLUDE_DIRS libusb_LIBRARIES)
+
+if(LIBUSB_FOUND)
+ add_library(libusb::libusb UNKNOWN IMPORTED)
+ set_target_properties(libusb::libusb PROPERTIES INTERFACE_INCLUDE_DIRECTORIES "${libusb_INCLUDE_DIR}")
+ set_target_properties(libusb::libusb PROPERTIES IMPORTED_LOCATION "${libusb_LIBRARIES}")
+endif()
COMPONENT OpenNI
)
list(APPEND PCL_3RDPARTY_COMPONENTS OpenNI)
- set(CPACK_NSIS_EXTRA_INSTALL_COMMANDS
- "${CPACK_NSIS_EXTRA_INSTALL_COMMANDS}\n ExecWait 'msiexec /i \\\"$INSTDIR\\\\3rdParty\\\\OpenNI\\\\${OPENNI_PACKAGE}\\\" /quiet '")
+ string(APPEND CPACK_NSIS_EXTRA_INSTALL_COMMANDS "\n ExecWait 'msiexec /i \\\"$INSTDIR\\\\3rdParty\\\\OpenNI\\\\${OPENNI_PACKAGE}\\\" /quiet '")
else()
message("WARNING : Could not download ${OPENNI_URL}, error code : ${_error_code}, error message : ${_error_message}")
endif()
COMPONENT OpenNI
)
list(APPEND PCL_3RDPARTY_COMPONENTS OpenNI)
- set(CPACK_NSIS_EXTRA_INSTALL_COMMANDS
- "${CPACK_NSIS_EXTRA_INSTALL_COMMANDS}\n ExecWait 'msiexec /i \\\"$INSTDIR\\\\3rdParty\\\\OpenNI\\\\${OPENNI_SENSOR_PACKAGE}\\\" /quiet '")
+ string(APPEND CPACK_NSIS_EXTRA_INSTALL_COMMANDS "\n ExecWait 'msiexec /i \\\"$INSTDIR\\\\3rdParty\\\\OpenNI\\\\${OPENNI_SENSOR_PACKAGE}\\\" /quiet '")
else()
message("WARNING : Could not download ${OPENNI_SENSOR_URL}, error code : ${_error_code}, error message : ${_error_message}")
endif()
list(REMOVE_DUPLICATES PCL_3RDPARTY_COMPONENTS)
- set(CPACK_NSIS_EXTRA_INSTALL_COMMANDS "${CPACK_NSIS_EXTRA_INSTALL_COMMANDS}\n noinstall_openni_packages:\n")
+ string(APPEND CPACK_NSIS_EXTRA_INSTALL_COMMANDS "\n noinstall_openni_packages:\n")
endif()
if(WITH_OPENNI2)
COMPONENT OpenNI2
)
list(APPEND PCL_3RDPARTY_COMPONENTS OpenNI2)
- set(CPACK_NSIS_EXTRA_INSTALL_COMMANDS
- "${CPACK_NSIS_EXTRA_INSTALL_COMMANDS}\n ExecWait 'msiexec /i \\\"$INSTDIR\\\\3rdParty\\\\OpenNI2\\\\${OPENNI2_PACKAGE}\\\" /quiet '")
+ string(APPEND CPACK_NSIS_EXTRA_INSTALL_COMMANDS "\n ExecWait 'msiexec /i \\\"$INSTDIR\\\\3rdParty\\\\OpenNI2\\\\${OPENNI2_PACKAGE}\\\" /quiet '")
else()
message("WARNING : Could not unzip ${OPENNI2_ZIP}, error code : ${_error_code}, error message : ${_error_message}")
endif()
message("WARNING : Could not download ${OPENNI2_ZIP_URL}, error code : ${_error_code}, error message : ${_error_message}")
endif()
list(REMOVE_DUPLICATES PCL_3RDPARTY_COMPONENTS)
- set(CPACK_NSIS_EXTRA_INSTALL_COMMANDS "${CPACK_NSIS_EXTRA_INSTALL_COMMANDS}\n noinstall_openni2_packages:\n")
+ string(APPEND CPACK_NSIS_EXTRA_INSTALL_COMMANDS "\n noinstall_openni2_packages:\n")
endif()
set(CPACK_NSIS_PACKAGE_NAME "${PROJECT_NAME}-${PCL_VERSION_PRETTY}-AllInOne")
endif()
if(MSVC_VERSION EQUAL 1900)
- set(CPACK_NSIS_PACKAGE_NAME "${CPACK_NSIS_PACKAGE_NAME}-msvc2015-${win_system_name}")
+ string(APPEND CPACK_NSIS_PACKAGE_NAME "-msvc2015-${win_system_name}")
elseif(MSVC_VERSION MATCHES "^191[0-9]$")
- set(CPACK_NSIS_PACKAGE_NAME "${CPACK_NSIS_PACKAGE_NAME}-msvc2017-${win_system_name}")
+ string(APPEND CPACK_NSIS_PACKAGE_NAME "-msvc2017-${win_system_name}")
elseif(MSVC_VERSION MATCHES "^192[0-9]$")
- set(CPACK_NSIS_PACKAGE_NAME "${CPACK_NSIS_PACKAGE_NAME}-msvc2019-${win_system_name}")
+ string(APPEND CPACK_NSIS_PACKAGE_NAME "-msvc2019-${win_system_name}")
else()
- set(CPACK_NSIS_PACKAGE_NAME "${CPACK_NSIS_PACKAGE_NAME}-${win_system_name}")
+ string(APPEND CPACK_NSIS_PACKAGE_NAME "-${win_system_name}")
endif()
set(CPACK_PACKAGE_FILE_NAME ${CPACK_NSIS_PACKAGE_NAME})
# force CPACK_PACKAGE_INSTALL_REGISTRY_KEY because of a known limitation in cmake/cpack to be fixed in next releases
# add documentation
if(WITH_DOCS)
- set(CPACK_COMPONENTS_ALL "${CPACK_COMPONENTS_ALL} doc")
- set(PCL_CPACK_COMPONENTS "${PCL_CPACK_COMPONENTS}\nset(CPACK_COMPONENT_DOC_GROUP \"PCL\")\n")
- set(PCL_CPACK_COMPONENTS "${PCL_CPACK_COMPONENTS}\nset(CPACK_COMPONENT_DOC_DISPLAY_NAME \"Documentation\")\n")
- set(PCL_CPACK_COMPONENTS "${PCL_CPACK_COMPONENTS}\nset(CPACK_COMPONENT_DOC_DESCRIPTION \"API documentation and tutorials\")\n")
+ string(APPEND CPACK_COMPONENTS_ALL " doc")
+ string(APPEND PCL_CPACK_COMPONENTS "\nset(CPACK_COMPONENT_DOC_GROUP \"PCL\")\n")
+ string(APPEND PCL_CPACK_COMPONENTS "\nset(CPACK_COMPONENT_DOC_DISPLAY_NAME \"Documentation\")\n")
+ string(APPEND PCL_CPACK_COMPONENTS "\nset(CPACK_COMPONENT_DOC_DESCRIPTION \"API documentation and tutorials\")\n")
endif()
# add PCLConfig
- set(CPACK_COMPONENTS_ALL "${CPACK_COMPONENTS_ALL} pclconfig")
- set(PCL_CPACK_COMPONENTS "${PCL_CPACK_COMPONENTS}\nset(CPACK_COMPONENT_PCLCONFIG_GROUP \"PCL\")\n")
- set(PCL_CPACK_COMPONENTS "${PCL_CPACK_COMPONENTS}\nset(CPACK_COMPONENT_PCLCONFIG_DISPLAY_NAME \"PCLConfig\")\n")
- set(PCL_CPACK_COMPONENTS "${PCL_CPACK_COMPONENTS}\nset(CPACK_COMPONENT_PCLCONFIG_DESCRIPTION \"Helper cmake configuration scripts used by find_package(PCL)\")\n")
+ string(APPEND CPACK_COMPONENTS_ALL " pclconfig")
+ string(APPEND PCL_CPACK_COMPONENTS "\nset(CPACK_COMPONENT_PCLCONFIG_GROUP \"PCL\")\n")
+ string(APPEND PCL_CPACK_COMPONENTS "\nset(CPACK_COMPONENT_PCLCONFIG_DISPLAY_NAME \"PCLConfig\")\n")
+ string(APPEND PCL_CPACK_COMPONENTS "\nset(CPACK_COMPONENT_PCLCONFIG_DESCRIPTION \"Helper cmake configuration scripts used by find_package(PCL)\")\n")
# add 3rdParty libs
if(BUILD_all_in_one_installer)
- set(PCL_CPACK_COMPONENTS "${PCL_CPACK_COMPONENTS}\nset(CPACK_COMPONENT_GROUP_THIRDPARTY_DISPLAY_NAME \"3rd Party Libraries\")")
- set(PCL_CPACK_COMPONENTS "${PCL_CPACK_COMPONENTS}\nset(CPACK_COMPONENT_GROUP_THIRDPARTY_DESCRIPTION \"3rd Party Libraries\")")
+ string(APPEND PCL_CPACK_COMPONENTS "\nset(CPACK_COMPONENT_GROUP_THIRDPARTY_DISPLAY_NAME \"3rd Party Libraries\")")
+ string(APPEND PCL_CPACK_COMPONENTS "\nset(CPACK_COMPONENT_GROUP_THIRDPARTY_DESCRIPTION \"3rd Party Libraries\")")
foreach(dep ${PCL_3RDPARTY_COMPONENTS})
string(TOUPPER ${dep} DEP)
- set(PCL_CPACK_COMPONENTS "${PCL_CPACK_COMPONENTS}\nset(CPACK_COMPONENT_${DEP}_GROUP \"ThirdParty\")")
- set(CPACK_COMPONENTS_ALL "${CPACK_COMPONENTS_ALL} ${dep}")
+ string(APPEND PCL_CPACK_COMPONENTS "\nset(CPACK_COMPONENT_${DEP}_GROUP \"ThirdParty\")")
+ string(APPEND CPACK_COMPONENTS_ALL " ${dep}")
endforeach()
endif()
- set(PCL_CPACK_COMPONENTS "${PCL_CPACK_COMPONENTS}\nset(CPACK_COMPONENTS_ALL${CPACK_COMPONENTS_ALL})\n")
+ string(APPEND PCL_CPACK_COMPONENTS "\nset(CPACK_COMPONENTS_ALL${CPACK_COMPONENTS_ALL})\n")
configure_file(${_cpack_cfg_in} ${PCL_CPACK_CFG_FILE} @ONLY)
endmacro()
foreach(_ss ${PCL_CPACK_SUBSYSTEMS})
PCL_GET_SUBSYS_STATUS(_status ${_ss})
if(_status)
- set(_comps_list "${_comps_list} pcl_${_ss}")
+ string(APPEND _comps_list " pcl_${_ss}")
PCL_CPACK_ADD_COMP_INFO(${_var} ${_ss})
endif()
endforeach()
set(_deps_str)
GET_IN_MAP(_deps PCL_SUBSYS_DEPS ${_ss})
foreach(_dep ${_deps})
- set(_deps_str "${_deps_str} pcl_${_dep}")
+ string(APPEND _deps_str " pcl_${_dep}")
endforeach()
set(${_var}
"${${_var}}set(CPACK_COMPONENT_PCL_${_comp_name}_DEPENDS ${_deps_str})\n")
--- /dev/null
+###############################################################################
+# Check for the presence of AVX and figure out the flags to use for it.
+function(PCL_CHECK_FOR_AVX)
+ set(AVX_FLAGS)
+
+ include(CheckCXXSourceRuns)
+
+ check_cxx_source_runs("
+ #include <immintrin.h>
+ int main()
+ {
+ __m256i a = {0};
+ a = _mm256_abs_epi16(a);
+ return 0;
+ }"
+ HAVE_AVX2)
+
+ if(NOT HAVE_AVX2)
+ check_cxx_source_runs("
+ #include <immintrin.h>
+ int main()
+ {
+ __m256 a;
+ a = _mm256_set1_ps(0);
+ return 0;
+ }"
+ HAVE_AVX)
+ endif()
+
+# Setting the /arch defines __AVX(2)__, see here https://docs.microsoft.com/en-us/cpp/build/reference/arch-x64?view=msvc-160
+# AVX2 extends and includes AVX.
+# Setting these defines allows the compiler to use AVX instructions as well as code guarded with the defines.
+# TODO: Add AVX512 variant if needed.
+ if(MSVC)
+ if(HAVE_AVX2)
+ set(AVX_FLAGS "/arch:AVX2" PARENT_SCOPE)
+ elseif(HAVE_AVX)
+ set(AVX_FLAGS "/arch:AVX" PARENT_SCOPE)
+ endif()
+ endif()
+endfunction()
set(Boost_USE_STATIC_LIBS OFF)
set(Boost_USE_STATIC OFF)
set(Boost_USE_MULTITHREAD ON)
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DBOOST_ALL_DYN_LINK -DBOOST_ALL_NO_LIB")
+ string(APPEND CMAKE_CXX_FLAGS " -DBOOST_ALL_DYN_LINK -DBOOST_ALL_NO_LIB")
else()
if(NOT PCL_SHARED_LIBS OR WIN32)
set(Boost_USE_STATIC_LIBS ON)
endif()
set(Boost_ADDITIONAL_VERSIONS
- "1.73.0" "1.73" "1.72.0" "1.72" "1.71.0" "1.71" "1.70.0" "1.70"
- "1.69.0" "1.69" "1.68.0" "1.68" "1.67.0" "1.67" "1.66.0" "1.66" "1.65.1" "1.65.0" "1.65"
- "1.64.0" "1.64" "1.63.0" "1.63" "1.62.0" "1.62" "1.61.0" "1.61" "1.60.0" "1.60"
- "1.59.0" "1.59" "1.58.0" "1.58" "1.57.0" "1.57" "1.56.0" "1.56" "1.55.0" "1.55")
+ "1.76.0" "1.76" "1.75.0" "1.75"
+ "1.74.0" "1.74" "1.73.0" "1.73" "1.72.0" "1.72" "1.71.0" "1.71" "1.70.0" "1.70"
+ "1.69.0" "1.69" "1.68.0" "1.68" "1.67.0" "1.67" "1.66.0" "1.66" "1.65.1" "1.65.0" "1.65")
# Disable the config mode of find_package(Boost)
set(Boost_NO_BOOST_CMAKE ON)
# Optional boost modules
-find_package(Boost 1.55.0 QUIET COMPONENTS serialization mpi)
+find_package(Boost 1.65.0 QUIET COMPONENTS serialization mpi)
if(Boost_SERIALIZATION_FOUND)
set(BOOST_SERIALIZATION_FOUND TRUE)
endif()
# Required boost modules
set(BOOST_REQUIRED_MODULES filesystem date_time iostreams system)
-find_package(Boost 1.55.0 REQUIRED COMPONENTS ${BOOST_REQUIRED_MODULES})
+find_package(Boost 1.65.0 REQUIRED COMPONENTS ${BOOST_REQUIRED_MODULES})
if(Boost_FOUND)
set(BOOST_FOUND TRUE)
# https://docs.nvidia.com/cuda/cuda-toolkit-release-notes/index.html#deprecated-features
if(NOT ${CUDA_VERSION_STRING} VERSION_LESS "11.0")
- set(__cuda_arch_bin "5.2 5.3 6.0 6.1 7.0 7.2 7.5")
+ set(__cuda_arch_bin "3.5 3.7 5.0 5.2 5.3 6.0 6.1 6.2 7.0 7.2 7.5 8.0 8.6")
elseif(NOT ${CUDA_VERSION_STRING} VERSION_LESS "10.0")
- set(__cuda_arch_bin "3.0 3.5 5.0 5.2 5.3 6.0 6.1 7.0 7.2 7.5")
+ set(__cuda_arch_bin "3.0 3.2 3.5 3.7 5.0 5.2 5.3 6.0 6.1 6.2 7.0 7.2 7.5")
elseif(NOT ${CUDA_VERSION_STRING} VERSION_LESS "9.0")
- set(__cuda_arch_bin "3.0 3.5 5.0 5.2 5.3 6.0 6.1 7.0 7.2")
+ set(__cuda_arch_bin "3.0 3.2 3.5 3.7 5.0 5.2 5.3 6.0 6.1 6.2 7.0 7.2")
endif()
set(CUDA_ARCH_BIN ${__cuda_arch_bin} CACHE STRING "Specify 'real' GPU architectures to build binaries for, BIN(PTX) format is supported")
###############################################################################
# Check for the presence of SSE and figure out the flags to use for it.
-macro(PCL_CHECK_FOR_SSE)
- set(SSE_FLAGS)
- set(SSE_DEFINITIONS)
-
- if(NOT CMAKE_CROSSCOMPILING)
- # Test GCC/G++ and CLANG
- if(CMAKE_COMPILER_IS_GNUCC OR CMAKE_COMPILER_IS_GNUCXX OR CMAKE_COMPILER_IS_CLANG)
- include(CheckCXXCompilerFlag)
- check_cxx_compiler_flag("-march=native" HAVE_MARCH)
- if(HAVE_MARCH)
- list(APPEND SSE_FLAGS "-march=native")
- else()
- list(APPEND SSE_FLAGS "-mtune=native")
- endif()
- message(STATUS "Using CPU native flags for SSE optimization: ${SSE_FLAGS}")
- endif()
- endif()
-
- # Unfortunately we need to check for SSE to enable "-mfpmath=sse" alongside
- # "-march=native". The reason for this is that by default, 32bit architectures
- # tend to use the x87 FPU (which has 80 bit internal precision), thus leading
- # to different results than 64bit architectures which are using SSE2 (64 bit internal
- # precision). One solution would be to use "-ffloat-store" on 32bit (see
- # http://gcc.gnu.org/onlinedocs/gcc/Optimize-Options.html), but that slows code down,
- # so the preferred solution is to try "-mpfmath=sse" first.
- include(CheckCXXSourceRuns)
- set(CMAKE_REQUIRED_FLAGS)
+function(PCL_CHECK_FOR_SSE)
+ set(SSE_FLAGS)
+ set(SSE_DEFINITIONS)
- check_cxx_source_runs("
- // Intel compiler defines an incompatible _mm_malloc signature
- #if defined(__INTEL_COMPILER)
- #include <malloc.h>
- #else
- #include <mm_malloc.h>
- #endif
- int main()
- {
- void* mem = _mm_malloc (100, 16);
- return 0;
- }"
- HAVE_MM_MALLOC)
+ if(NOT CMAKE_CROSSCOMPILING)
+ # Test GCC/G++ and CLANG
+ if(CMAKE_COMPILER_IS_GNUCC OR CMAKE_COMPILER_IS_GNUCXX OR CMAKE_COMPILER_IS_CLANG)
+ include(CheckCXXCompilerFlag)
+ check_cxx_compiler_flag("-march=native" HAVE_MARCH)
+ if(HAVE_MARCH)
+ list(APPEND SSE_FLAGS "-march=native")
+ else()
+ list(APPEND SSE_FLAGS "-mtune=native")
+ endif()
+ message(STATUS "Using CPU native flags for SSE optimization: ${SSE_FLAGS}")
+ endif()
+ endif()
- check_cxx_source_runs("
- #include <stdlib.h>
- int main()
- {
- void* mem;
- return posix_memalign (&mem, 16, 100);
- }"
- HAVE_POSIX_MEMALIGN)
+ # Unfortunately we need to check for SSE to enable "-mfpmath=sse" alongside
+ # "-march=native". The reason for this is that by default, 32bit architectures
+ # tend to use the x87 FPU (which has 80 bit internal precision), thus leading
+ # to different results than 64bit architectures which are using SSE2 (64 bit internal
+ # precision). One solution would be to use "-ffloat-store" on 32bit (see
+ # http://gcc.gnu.org/onlinedocs/gcc/Optimize-Options.html), but that slows code down,
+ # so the preferred solution is to try "-mpfmath=sse" first.
+ include(CheckCXXSourceRuns)
+ set(CMAKE_REQUIRED_FLAGS)
+ set(SSE_LEVEL 0)
+
+ check_cxx_source_runs("
+ // Intel compiler defines an incompatible _mm_malloc signature
+ #if defined(__INTEL_COMPILER)
+ #include <malloc.h>
+ #else
+ #include <mm_malloc.h>
+ #endif
+ int main()
+ {
+ void* mem = _mm_malloc (100, 16);
+ return 0;
+ }"
+ HAVE_MM_MALLOC)
+
+ check_cxx_source_runs("
+ #include <stdlib.h>
+ int main()
+ {
+ void* mem;
+ return posix_memalign (&mem, 16, 100);
+ }"
+ HAVE_POSIX_MEMALIGN)
+ if(CMAKE_COMPILER_IS_GNUCC OR CMAKE_COMPILER_IS_GNUCXX OR CMAKE_COMPILER_IS_CLANG)
+ set(CMAKE_REQUIRED_FLAGS "-msse4.2")
+ endif()
+
+ check_cxx_source_runs("
+ #include <emmintrin.h>
+ #include <nmmintrin.h>
+ int main ()
+ {
+ long long a[2] = { 1, 2 };
+ long long b[2] = { -1, 3 };
+ long long c[2];
+ __m128i va = _mm_loadu_si128 ((__m128i*)a);
+ __m128i vb = _mm_loadu_si128 ((__m128i*)b);
+ __m128i vc = _mm_cmpgt_epi64 (va, vb);
+
+ _mm_storeu_si128 ((__m128i*)c, vc);
+ if (c[0] == -1LL && c[1] == 0LL)
+ return (0);
+ else
+ return (1);
+ }"
+ HAVE_SSE4_2_EXTENSIONS)
+
+ if(HAVE_SSE4_2_EXTENSIONS)
+ set(SSE_LEVEL 4.2)
+ endif()
+
+ if(SSE_LEVEL LESS 4.2)
if(CMAKE_COMPILER_IS_GNUCC OR CMAKE_COMPILER_IS_GNUCXX OR CMAKE_COMPILER_IS_CLANG)
- set(CMAKE_REQUIRED_FLAGS "-msse4.2")
+ set(CMAKE_REQUIRED_FLAGS "-msse4.1")
endif()
check_cxx_source_runs("
- #include <emmintrin.h>
- #include <nmmintrin.h>
- int main ()
- {
- long long a[2] = { 1, 2 };
- long long b[2] = { -1, 3 };
- long long c[2];
- __m128i va = _mm_loadu_si128 ((__m128i*)a);
- __m128i vb = _mm_loadu_si128 ((__m128i*)b);
- __m128i vc = _mm_cmpgt_epi64 (va, vb);
-
- _mm_storeu_si128 ((__m128i*)c, vc);
- if (c[0] == -1LL && c[1] == 0LL)
- return (0);
- else
- return (1);
- }"
- HAVE_SSE4_2_EXTENSIONS)
+ #include <smmintrin.h>
+ int main ()
+ {
+ __m128 a, b;
+ float vals[4] = {1, 2, 3, 4};
+ const int mask = 123;
+ a = _mm_loadu_ps (vals);
+ b = a;
+ b = _mm_dp_ps (a, a, mask);
+ _mm_storeu_ps (vals,b);
+ return (0);
+ }"
+ HAVE_SSE4_1_EXTENSIONS)
+
+ if(HAVE_SSE4_1_EXTENSIONS)
+ set(SSE_LEVEL 4.1)
+ endif()
+ endif()
+ if(SSE_LEVEL LESS 4.1)
if(CMAKE_COMPILER_IS_GNUCC OR CMAKE_COMPILER_IS_GNUCXX OR CMAKE_COMPILER_IS_CLANG)
- set(CMAKE_REQUIRED_FLAGS "-msse4.1")
+ set(CMAKE_REQUIRED_FLAGS "-mssse3")
endif()
-
+
check_cxx_source_runs("
- #include <smmintrin.h>
- int main ()
- {
- __m128 a, b;
- float vals[4] = {1, 2, 3, 4};
- const int mask = 123;
- a = _mm_loadu_ps (vals);
- b = a;
- b = _mm_dp_ps (a, a, mask);
- _mm_storeu_ps (vals,b);
- return (0);
- }"
- HAVE_SSE4_1_EXTENSIONS)
+ #include <tmmintrin.h>
+ int main ()
+ {
+ __m128i a, b;
+ int vals[4] = {-1, -2, -3, -4};
+ a = _mm_loadu_si128 ((const __m128i*)vals);
+ b = _mm_abs_epi32 (a);
+ _mm_storeu_si128 ((__m128i*)vals, b);
+ return (0);
+ }"
+ HAVE_SSSE3_EXTENSIONS)
+ if(HAVE_SSSE3_EXTENSIONS)
+ set(SSE_LEVEL 3.1)
+ endif()
+ endif()
+
+ if(SSE_LEVEL LESS 3.1)
if(CMAKE_COMPILER_IS_GNUCC OR CMAKE_COMPILER_IS_GNUCXX OR CMAKE_COMPILER_IS_CLANG)
- set(CMAKE_REQUIRED_FLAGS "-mssse3")
+ set(CMAKE_REQUIRED_FLAGS "-msse3")
endif()
check_cxx_source_runs("
- #include <tmmintrin.h>
- int main ()
- {
- __m128i a, b;
- int vals[4] = {-1, -2, -3, -4};
- a = _mm_loadu_si128 ((const __m128i*)vals);
- b = _mm_abs_epi32 (a);
- _mm_storeu_si128 ((__m128i*)vals, b);
+ #include <pmmintrin.h>
+ int main ()
+ {
+ __m128d a, b;
+ double vals[2] = {0};
+ a = _mm_loadu_pd (vals);
+ b = _mm_hadd_pd (a,a);
+ _mm_storeu_pd (vals, b);
return (0);
- }"
- HAVE_SSSE3_EXTENSIONS)
+ }"
+ HAVE_SSE3_EXTENSIONS)
- if(CMAKE_COMPILER_IS_GNUCC OR CMAKE_COMPILER_IS_GNUCXX OR CMAKE_COMPILER_IS_CLANG)
- set(CMAKE_REQUIRED_FLAGS "-msse3")
+ if(HAVE_SSE3_EXTENSIONS)
+ set( SSE_LEVEL 3.0)
endif()
+ endif()
- check_cxx_source_runs("
- #include <pmmintrin.h>
- int main ()
- {
- __m128d a, b;
- double vals[2] = {0};
- a = _mm_loadu_pd (vals);
- b = _mm_hadd_pd (a,a);
- _mm_storeu_pd (vals, b);
- return (0);
- }"
- HAVE_SSE3_EXTENSIONS)
-
+ if(SSE_LEVEL LESS 3.0)
if(CMAKE_COMPILER_IS_GNUCC OR CMAKE_COMPILER_IS_GNUCXX OR CMAKE_COMPILER_IS_CLANG)
- set(CMAKE_REQUIRED_FLAGS "-msse2")
+ set(CMAKE_REQUIRED_FLAGS "-msse2")
elseif(MSVC AND NOT CMAKE_CL_64)
- set(CMAKE_REQUIRED_FLAGS "/arch:SSE2")
+ set(CMAKE_REQUIRED_FLAGS "/arch:SSE2")
endif()
check_cxx_source_runs("
- #include <emmintrin.h>
- int main ()
- {
- __m128d a, b;
- double vals[2] = {0};
- a = _mm_loadu_pd (vals);
- b = _mm_add_pd (a,a);
- _mm_storeu_pd (vals,b);
- return (0);
- }"
- HAVE_SSE2_EXTENSIONS)
+ #include <emmintrin.h>
+ int main ()
+ {
+ __m128d a, b;
+ double vals[2] = {0};
+ a = _mm_loadu_pd (vals);
+ b = _mm_add_pd (a,a);
+ _mm_storeu_pd (vals,b);
+ return (0);
+ }"
+ HAVE_SSE2_EXTENSIONS)
+
+ if(HAVE_SSE2_EXTENSIONS)
+ set(SSE_LEVEL 2.0)
+ endif()
+ endif()
+ if(SSE_LEVEL LESS 2.0)
if(CMAKE_COMPILER_IS_GNUCC OR CMAKE_COMPILER_IS_GNUCXX OR CMAKE_COMPILER_IS_CLANG)
- set(CMAKE_REQUIRED_FLAGS "-msse")
+ set(CMAKE_REQUIRED_FLAGS "-msse")
elseif(MSVC AND NOT CMAKE_CL_64)
- set(CMAKE_REQUIRED_FLAGS "/arch:SSE")
+ set(CMAKE_REQUIRED_FLAGS "/arch:SSE")
endif()
check_cxx_source_runs("
- #include <xmmintrin.h>
- int main ()
- {
- __m128 a, b;
- float vals[4] = {0};
- a = _mm_loadu_ps (vals);
- b = a;
- b = _mm_add_ps (a,b);
- _mm_storeu_ps (vals,b);
- return (0);
- }"
- HAVE_SSE_EXTENSIONS)
-
- set(CMAKE_REQUIRED_FLAGS)
+ #include <xmmintrin.h>
+ int main ()
+ {
+ __m128 a, b;
+ float vals[4] = {0};
+ a = _mm_loadu_ps (vals);
+ b = a;
+ b = _mm_add_ps (a,b);
+ _mm_storeu_ps (vals,b);
+ return (0);
+ }"
+ HAVE_SSE_EXTENSIONS)
- if(CMAKE_COMPILER_IS_GNUCC OR CMAKE_COMPILER_IS_GNUCXX OR CMAKE_COMPILER_IS_CLANG)
- if(HAVE_SSE4_2_EXTENSIONS)
- list(APPEND SSE_FLAGS "-msse4.2" "-mfpmath=sse")
- elseif(HAVE_SSE4_1_EXTENSIONS)
- list(APPEND SSE_FLAGS "-msse4.1" "-mfpmath=sse")
- elseif(HAVE_SSSE3_EXTENSIONS)
- list(APPEND SSE_FLAGS "-mssse3" "-mfpmath=sse")
- elseif(HAVE_SSE3_EXTENSIONS)
- list(APPEND SSE_FLAGS "-msse3" "-mfpmath=sse")
- elseif(HAVE_SSE2_EXTENSIONS)
- list(APPEND SSE_FLAGS "-msse2" "-mfpmath=sse")
- elseif(HAVE_SSE_EXTENSIONS)
- list(APPEND SSE_FLAGS "-msse" "-mfpmath=sse")
- else()
- # Setting -ffloat-store to alleviate 32bit vs 64bit discrepancies on non-SSE
- # platforms.
- list(APPEND SSE_FLAGS "-ffloat-store")
- endif()
- elseif(MSVC AND NOT CMAKE_CL_64)
- if(HAVE_SSE2_EXTENSIONS)
- list(APPEND SSE_FLAGS "/arch:SSE2")
- elseif(HAVE_SSE_EXTENSIONS)
- list(APPEND SSE_FLAGS "/arch:SSE")
- endif()
- endif()
-
- if(MSVC)
- if(HAVE_SSSE3_EXTENSIONS)
- set(SSE_DEFINITIONS "${SSE_DEFINITIONS} -D__SSSE3__")
- endif()
- if(HAVE_SSE2_EXTENSIONS)
- set(SSE_DEFINITIONS "${SSE_DEFINITIONS} -D__SSE2__")
- endif()
- if(HAVE_SSE_EXTENSIONS)
- set(SSE_DEFINITIONS "${SSE_DEFINITIONS} -D__SSE__")
- endif()
- endif()
- string(REPLACE ";" " " SSE_FLAGS_STR "${SSE_FLAGS}")
-endmacro()
+ if(HAVE_SSE_EXTENSIONS)
+ set(SSE_LEVEL 1.0)
+ endif()
+ endif()
+
+ if(CMAKE_COMPILER_IS_GNUCC OR CMAKE_COMPILER_IS_GNUCXX OR CMAKE_COMPILER_IS_CLANG)
+ if(SSE_LEVEL GREATER_EQUAL 1.0)
+ if(SSE_LEVEL GREATER_EQUAL 4.2)
+ set(SSE_FLAGS "-msse4.2")
+ elseif(SSE_LEVEL GREATER_EQUAL 4.1)
+ set(SSE_FLAGS "-msse4.1")
+ elseif(SSE_LEVEL GREATER_EQUAL 3.1)
+ set(SSE_FLAGS "-msse3")
+ elseif(SSE_LEVEL GREATER_EQUAL 3.0)
+ set(SSE_FLAGS "-msse3")
+ elseif(SSE_LEVEL GREATER_EQUAL 2.0)
+ set(SSE_FLAGS "-msse2")
+ else()
+ set(SSE_FLAGS "-msse")
+ endif()
+ string(APPEND SSE_FLAGS " -mfpmath=sse")
+ else()
+ # Setting -ffloat-store to alleviate 32bit vs 64bit discrepancies on non-SSE
+ # platforms.
+ list(APPEND SSE_FLAGS "-ffloat-store")
+ endif()
+ elseif(MSVC AND NOT CMAKE_SIZEOF_VOID_P)
+ if(SSE_LEVEL GREATER_EQUAL 2.0)
+ set( SSE_FLAGS "/arch:SSE2")
+ elseif(SSE_LEVEL GREATER_EQUAL 1.0)
+ set( SSE_FLAGS "/arch:SSE")
+ endif()
+ elseif(MSVC)
+ if(SSE_LEVEL GREATER_EQUAL 4.2)
+ string(APPEND SSE_DEFINITIONS " -D__SSE4_2__")
+ endif()
+ if(SSE_LEVEL GREATER_EQUAL 4.1)
+ string(APPEND SSE_DEFINITIONS " -D__SSE4_1__")
+ endif()
+ if(SSE_LEVEL GREATER_EQUAL 3.1)
+ string(APPEND SSE_DEFINITIONS " -D__SSSE3__")
+ endif()
+ if(SSE_LEVEL GREATER_EQUAL 3.0)
+ string(APPEND SSE_DEFINITIONS " -D__SSE3__")
+ endif()
+ if(SSE_LEVEL GREATER_EQUAL 2.0)
+ string(APPEND SSE_DEFINITIONS " -D__SSE2__")
+ endif()
+ if(SSE_LEVEL GREATER_EQUAL 1.0)
+ string(APPEND SSE_DEFINITIONS " -D__SSE__")
+ endif()
+ endif()
+
+ set(SSE_FLAGS ${SSE_FLAGS} PARENT_SCOPE)
+ set(SSE_DEFINITIONS ${SSE_DEFINITIONS} PARENT_SCOPE)
+
+ unset(CMAKE_REQUIRED_FLAGS)
+endfunction()
--- /dev/null
+function(checkVTKComponents)
+ cmake_parse_arguments(PARAM "" "MISSING_COMPONENTS" "COMPONENTS" ${ARGN})
+
+ set(vtkMissingComponents)
+
+ foreach(vtkComponent ${PARAM_COMPONENTS})
+ if (VTK_VERSION VERSION_LESS 9.0)
+ if (NOT TARGET ${vtkComponent})
+ list(APPEND vtkMissingComponents ${vtkComponent})
+ endif()
+ else()
+ if (NOT TARGET VTK::${vtkComponent})
+ list(APPEND vtkMissingComponents ${vtkComponent})
+ endif()
+ endif()
+ endforeach()
+
+ set(${PARAM_MISSING_COMPONENTS} ${vtkMissingComponents} PARENT_SCOPE)
+endfunction()
+
+# Start with a generic call to find any VTK version we are supporting, so we retrieve
+# the version of VTK. As the module names were changed from VTK 8.2 to 9.0, we don't
+# search explicitly for modules. Furthermore we don't pass required minimum version 6.2
+# to find_package because then it only accept versions with same major version.
+find_package(VTK)
+
+if(NOT VTK_FOUND)
+ return()
+endif()
+
+if(VTK_FOUND AND (VTK_VERSION VERSION_LESS 6.2))
+ message(WARNING "The minimum required version of VTK is 6.2, but found ${VTK_VERSION}")
+ set(VTK_FOUND FALSE)
+ return()
+endif()
+
+set(NON_PREFIX_PCL_VTK_COMPONENTS
+ ChartsCore
+ CommonColor
+ CommonComputationalGeometry
+ CommonCore
+ CommonDataModel
+ CommonExecutionModel
+ CommonMath
+ CommonMisc
+ CommonTransforms
+ FiltersCore
+ FiltersExtraction
+ FiltersGeneral
+ FiltersGeometry
+ FiltersModeling
+ FiltersSources
+ ImagingCore
+ ImagingSources
+ InteractionStyle
+ InteractionWidgets
+ IOCore
+ IOGeometry
+ IOImage
+ IOLegacy
+ IOPLY
+ RenderingAnnotation
+ RenderingCore
+ RenderingContext2D
+ RenderingLOD
+ RenderingFreeType
+ ViewsCore
+ ViewsContext2D
+)
+
+#If VTK version 6 use OpenGL
+if(VTK_VERSION VERSION_LESS 7.0)
+ set(VTK_RENDERING_BACKEND "OpenGL")
+ set(VTK_RENDERING_BACKEND_OPENGL_VERSION "1")
+ message(DEPRECATION "The rendering backend OpenGL is deprecated and not available anymore since VTK 8.2."
+ "Please switch to the OpenGL2 backend instead, which is available since VTK 6.2."
+ "Support of the deprecated backend will be dropped with PCL 1.13.")
+
+#If VTK version 7,8 or 9 use OpenGL2
+else()
+ set(VTK_RENDERING_BACKEND "OpenGL2")
+ set(VTK_RENDERING_BACKEND_OPENGL_VERSION "2")
+endif()
+
+list(APPEND NON_PREFIX_PCL_VTK_COMPONENTS Rendering${VTK_RENDERING_BACKEND})
+
+#Append vtk to components if version is <9.0
+if(VTK_VERSION VERSION_LESS 9.0)
+ foreach(vtkComponent ${NON_PREFIX_PCL_VTK_COMPONENTS})
+ set(vtkComponent "vtk${vtkComponent}")
+ list(APPEND PCL_VTK_COMPONENTS ${vtkComponent})
+ endforeach()
+else()
+ set(PCL_VTK_COMPONENTS ${NON_PREFIX_PCL_VTK_COMPONENTS})
+endif()
+
+# Check if requested modules are available
+checkVTKComponents(COMPONENTS ${PCL_VTK_COMPONENTS} MISSING_COMPONENTS vtkMissingComponents)
+
+if (vtkMissingComponents)
+ set(VTK_FOUND FALSE)
+ message(WARNING "Missing vtk modules: ${vtkMissingComponents}")
+endif()
+
+if("vtkGUISupportQt" IN_LIST VTK_MODULES_ENABLED AND "vtkRenderingQt" IN_LIST VTK_MODULES_ENABLED)
+ set(HAVE_QVTK TRUE)
+ #PCL_VTK_COMPONENTS is used in the PCLConfig.cmake to refind the required modules.
+ #Pre vtk 9.0, all vtk libraries are linked into pcl_visualizer.
+ #Subprojects can link against pcl_visualizer and directly use VTK-QT libraries.
+ list(APPEND PCL_VTK_COMPONENTS vtkRenderingQt vtkGUISupportQt)
+elseif("GUISupportQt" IN_LIST VTK_AVAILABLE_COMPONENTS AND "RenderingQt" IN_LIST VTK_AVAILABLE_COMPONENTS)
+ set(HAVE_QVTK TRUE)
+ #PCL_VTK_COMPONENTS is used in the PCLConfig.cmake to refind the required modules.
+ #Post vtk 9.0, only required libraries are linked against pcl_visualizer.
+ #Subprojects need to manually link to VTK-QT libraries.
+ list(APPEND PCL_VTK_COMPONENTS RenderingQt GUISupportQt)
+else()
+ unset(HAVE_QVTK)
+endif()
+
+if(PCL_SHARED_LIBS OR (NOT (PCL_SHARED_LIBS) AND NOT (VTK_BUILD_SHARED_LIBS)))
+ if(VTK_VERSION VERSION_LESS 9.0)
+ if(VTK_USE_FILE)
+ include(${VTK_USE_FILE})
+ endif()
+ endif()
+
+ if(APPLE)
+ option(VTK_USE_COCOA "Use Cocoa for VTK render windows" ON)
+ mark_as_advanced(VTK_USE_COCOA)
+ endif()
+else()
+ set(VTK_FOUND OFF)
+ message("Warning: You are to build PCL in STATIC but VTK is SHARED!")
+ message("Warning: VTK disabled!")
+endif()
+
+message(STATUS "VTK version: ${VTK_VERSION}")
+message(STATUS "VTK rendering backend: ${VTK_RENDERING_BACKEND}")
+
+if(HAVE_QVTK)
+ message(STATUS "VTK Qt support: YES")
+else()
+ message(STATUS "VTK Qt support: NOTFOUND")
+endif()
+
+if(VTK_INCLUDE_DIRS)
+ message(STATUS "VTK include: ${VTK_INCLUDE_DIRS}")
+ENDIF()
+
+if(VTK_LIBRARIES)
+ message(STATUS "VTK libs: ${VTK_LIBRARIES}")
+endif()
option(PCL_BUILD_WITH_BOOST_DYNAMIC_LINKING_WIN32 "Build against a dynamically linked Boost on Win32 platforms." OFF)
mark_as_advanced(PCL_BUILD_WITH_BOOST_DYNAMIC_LINKING_WIN32)
-# Build with dynamic linking for FLANN (advanced users)
-option(PCL_BUILD_WITH_FLANN_DYNAMIC_LINKING_WIN32 "Build against a dynamically linked FLANN on Win32 platforms." OFF)
+# Build with shared/static linking for FLANN (advanced users)
+set(PCL_FLANN_REQUIRED_TYPE "DONTCARE" CACHE STRING "Select build type to use (STATIC/SHARED).")
+set_property(CACHE PCL_FLANN_REQUIRED_TYPE PROPERTY STRINGS DONTCARE SHARED STATIC)
+
mark_as_advanced(PCL_BUILD_WITH_FLANN_DYNAMIC_LINKING_WIN32)
# Build with dynamic linking for QHull (advanced users)
option(PCL_ENABLE_SSE "Enable or Disable SSE optimizations." ON)
mark_as_advanced(PCL_ENABLE_SSE)
+if(WIN32)
+ # Enable or Disable the check for AVX optimizations
+ option(PCL_ENABLE_AVX "Enable or Disable AVX optimizations." ON)
+ mark_as_advanced(PCL_ENABLE_AVX)
+endif()
+
# Allow the user to enable compiler cache
option(PCL_ENABLE_CCACHE "Enable using compiler cache for compilation" OFF)
mark_as_advanced(PCL_ENABLE_CCACHE)
# (Used to prevent gpu tests from executing in CI where GPU hardware is unavailable)
option(PCL_DISABLE_GPU_TESTS "Disable running GPU tests. If disabled, tests will still be built." OFF)
+# Set whether visualizations tests should be run
+# (Used to prevent visualizations tests from executing in CI where visualization is unavailable)
+option(PCL_DISABLE_VISUALIZATION_TESTS "Disable running visualizations tests. If disabled, tests will still be built." OFF)
string(REGEX MATCH "^tests_" _is_test ${_ss})
if(_status AND NOT _is_test)
- set(PCLCONFIG_AVAILABLE_COMPONENTS "${PCLCONFIG_AVAILABLE_COMPONENTS} ${_ss}")
- set(PCLCONFIG_AVAILABLE_COMPONENTS_LIST "${PCLCONFIG_AVAILABLE_COMPONENTS_LIST}\n# - ${_ss}")
+ string(APPEND PCLCONFIG_AVAILABLE_COMPONENTS " ${_ss}")
+ string(APPEND PCLCONFIG_AVAILABLE_COMPONENTS_LIST "\n# - ${_ss}")
GET_IN_MAP(_deps PCL_SUBSYS_DEPS ${_ss})
if(_deps)
- set(PCLCONFIG_INTERNAL_DEPENDENCIES "${PCLCONFIG_INTERNAL_DEPENDENCIES}set(pcl_${_ss}_int_dep ")
+ string(APPEND PCLCONFIG_INTERNAL_DEPENDENCIES "set(pcl_${_ss}_int_dep ")
foreach(_dep ${_deps})
- set(PCLCONFIG_INTERNAL_DEPENDENCIES "${PCLCONFIG_INTERNAL_DEPENDENCIES}${_dep} ")
+ string(APPEND PCLCONFIG_INTERNAL_DEPENDENCIES "${_dep} ")
endforeach()
- set(PCLCONFIG_INTERNAL_DEPENDENCIES "${PCLCONFIG_INTERNAL_DEPENDENCIES})\n")
+ string(APPEND PCLCONFIG_INTERNAL_DEPENDENCIES ")\n")
endif()
GET_IN_MAP(_ext_deps PCL_SUBSYS_EXT_DEPS ${_ss})
if(_ext_deps)
- set(PCLCONFIG_EXTERNAL_DEPENDENCIES "${PCLCONFIG_EXTERNAL_DEPENDENCIES}set(pcl_${_ss}_ext_dep ")
+ string(APPEND PCLCONFIG_EXTERNAL_DEPENDENCIES "set(pcl_${_ss}_ext_dep ")
foreach(_ext_dep ${_ext_deps})
- set(PCLCONFIG_EXTERNAL_DEPENDENCIES "${PCLCONFIG_EXTERNAL_DEPENDENCIES}${_ext_dep} ")
+ string(APPEND PCLCONFIG_EXTERNAL_DEPENDENCIES "${_ext_dep} ")
endforeach()
- set(PCLCONFIG_EXTERNAL_DEPENDENCIES "${PCLCONFIG_EXTERNAL_DEPENDENCIES})\n")
+ string(APPEND PCLCONFIG_EXTERNAL_DEPENDENCIES ")\n")
endif()
GET_IN_MAP(_opt_deps PCL_SUBSYS_OPT_DEPS ${_ss})
if(_opt_deps)
- set(PCLCONFIG_OPTIONAL_DEPENDENCIES "${PCLCONFIG_OPTIONAL_DEPENDENCIES}set(pcl_${_ss}_opt_dep ")
+ string(APPEND PCLCONFIG_OPTIONAL_DEPENDENCIES "set(pcl_${_ss}_opt_dep ")
foreach(_opt_dep ${_opt_deps})
string(TOUPPER "WITH_${_opt_dep}" _tmp)
string(REGEX REPLACE "-(.*)" "" _condition ${_tmp}) #libusb-1.0 case
if(${_condition})
- set(PCLCONFIG_OPTIONAL_DEPENDENCIES "${PCLCONFIG_OPTIONAL_DEPENDENCIES}${_opt_dep} ")
+ string(APPEND PCLCONFIG_OPTIONAL_DEPENDENCIES "${_opt_dep} ")
endif()
endforeach()
- set(PCLCONFIG_OPTIONAL_DEPENDENCIES "${PCLCONFIG_OPTIONAL_DEPENDENCIES})\n")
+ string(APPEND PCLCONFIG_OPTIONAL_DEPENDENCIES ")\n")
endif()
#look for subsystems
foreach(_sub ${${PCL_SUBSYS_SUBSYS}})
PCL_GET_SUBSUBSYS_STATUS(_sub_status ${_ss} ${_sub})
if(_sub_status)
- set(PCLCONFIG_AVAILABLE_COMPONENTS "${PCLCONFIG_AVAILABLE_COMPONENTS} ${_sub}")
- set(PCLCONFIG_AVAILABLE_COMPONENTS_LIST "${PCLCONFIG_AVAILABLE_COMPONENTS_LIST}\n# - ${_sub}")
+ string(APPEND PCLCONFIG_AVAILABLE_COMPONENTS " ${_sub}")
+ string(APPEND PCLCONFIG_AVAILABLE_COMPONENTS_LIST "\n# - ${_sub}")
GET_IN_MAP(_deps PCL_SUBSYS_DEPS ${_ss}_${sub})
if(_deps)
- set(PCLCONFIG_INTERNAL_DEPENDENCIES "${PCLCONFIG_INTERNAL_DEPENDENCIES}set(pcl_${_sub}_int_dep ")
+ string(APPEND PCLCONFIG_INTERNAL_DEPENDENCIES "set(pcl_${_sub}_int_dep ")
foreach(_dep ${_deps})
- set(PCLCONFIG_INTERNAL_DEPENDENCIES "${PCLCONFIG_INTERNAL_DEPENDENCIES}${_dep} ")
+ string(APPEND PCLCONFIG_INTERNAL_DEPENDENCIES "${_dep} ")
endforeach()
- set(PCLCONFIG_INTERNAL_DEPENDENCIES "${PCLCONFIG_INTERNAL_DEPENDENCIES})\n")
+ string(APPEND PCLCONFIG_INTERNAL_DEPENDENCIES ")\n")
endif()
endif()
endforeach()
#Boost modules
set(PCLCONFIG_AVAILABLE_BOOST_MODULES "system filesystem date_time iostreams")
if(Boost_SERIALIZATION_FOUND)
- set(PCLCONFIG_AVAILABLE_BOOST_MODULES "${PCLCONFIG_AVAILABLE_BOOST_MODULES} serialization")
+ string(APPEND PCLCONFIG_AVAILABLE_BOOST_MODULES " serialization")
endif()
if(Boost_CHRONO_FOUND)
- set(PCLCONFIG_AVAILABLE_BOOST_MODULES "${PCLCONFIG_AVAILABLE_BOOST_MODULES} chrono")
+ string(APPEND PCLCONFIG_AVAILABLE_BOOST_MODULES " chrono")
endif()
configure_file("${PCL_SOURCE_DIR}/PCLConfig.cmake.in"
if(SUBSYS_EXT_DEPS)
foreach(_dep ${SUBSYS_EXT_DEPS})
string(TOUPPER "${_dep}_found" EXT_DEP_FOUND)
- if(NOT EXT_DEP_FOUND)
+ #Variable EXT_DEP_FOUND expands to ie. QHULL_FOUND which in turn is then used to see if the EXT_DEPS is found.
+ if(NOT ${EXT_DEP_FOUND})
set(${_var} FALSE)
PCL_SET_SUBSYS_STATUS(${_name} FALSE "Requires external library ${_dep}.")
endif()
if(SUBSUBSYS_EXT_DEPS)
foreach(_dep ${SUBSUBSYS_EXT_DEPS})
string(TOUPPER "${_dep}_found" EXT_DEP_FOUND)
- if(NOT EXT_DEP_FOUND)
+ if(NOT ${EXT_DEP_FOUND})
set(${_var} FALSE)
PCL_SET_SUBSYS_STATUS(${_parent}_${_name} FALSE "Requires external library ${_dep}.")
endif()
add_dependencies(tests ${_exename})
endmacro()
+###############################################################################
+# Add a benchmark target.
+# _name The benchmark name.
+# ARGN :
+# FILES the source files for the benchmark
+# ARGUMENTS Arguments for benchmark executable
+# LINK_WITH link benchmark executable with libraries
+function(PCL_ADD_BENCHMARK _name)
+ set(options)
+ set(oneValueArgs)
+ set(multiValueArgs FILES ARGUMENTS LINK_WITH)
+ cmake_parse_arguments(PCL_ADD_BENCHMARK "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
+
+ add_executable(benchmark_${_name} ${PCL_ADD_BENCHMARK_FILES})
+ set_target_properties(benchmark_${_name} PROPERTIES FOLDER "Benchmarks")
+ target_link_libraries(benchmark_${_name} benchmark::benchmark ${PCL_ADD_BENCHMARK_LINK_WITH})
+ set_target_properties(benchmark_${_name} PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
+
+ #Only applies to MSVC
+ if(MSVC)
+ #Requires CMAKE version 3.13.0
+ get_target_property(BenchmarkArgumentWarningShown run_benchmarks PCL_BENCHMARK_ARGUMENTS_WARNING_SHOWN)
+ if(CMAKE_VERSION VERSION_LESS "3.13.0" AND (NOT BenchmarkArgumentWarningShown))
+ message(WARNING "Arguments for benchmark projects are not added - this requires at least CMake 3.13. Can be added manually in \"Project settings -> Debugging -> Command arguments\"")
+ set_target_properties(run_benchmarks PROPERTIES PCL_BENCHMARK_ARGUMENTS_WARNING_SHOWN TRUE)
+ else()
+ #Only add if there are arguments to test
+ if(PCL_ADD_BENCHMARK_ARGUMENTS)
+ string (REPLACE ";" " " PCL_ADD_BENCHMARK_ARGUMENTS_STR "${PCL_ADD_BENCHMARK_ARGUMENTS}")
+ set_target_properties(benchmark_${_name} PROPERTIES VS_DEBUGGER_COMMAND_ARGUMENTS ${PCL_ADD_BENCHMARK_ARGUMENTS_STR})
+ endif()
+ endif()
+ endif()
+
+ add_custom_target(run_benchmark_${_name} benchmark_${_name} ${PCL_ADD_BENCHMARK_ARGUMENTS})
+ set_target_properties(run_benchmark_${_name} PROPERTIES FOLDER "Benchmarks")
+
+ add_dependencies(run_benchmarks run_benchmark_${_name})
+endfunction()
+
###############################################################################
# Add an example target.
# _name The example name.
set(PKG_LIBFLAGS ${PKGCONFIG_LIB_FLAGS})
LIST_TO_STRING(PKG_EXTERNAL_DEPS "${PKGCONFIG_EXT_DEPS}")
foreach(_dep ${PKGCONFIG_PCL_DEPS})
- set(PKG_EXTERNAL_DEPS "${PKG_EXTERNAL_DEPS} pcl_${_dep}-${PCL_VERSION_MAJOR}.${PCL_VERSION_MINOR}")
+ string(APPEND PKG_EXTERNAL_DEPS " pcl_${_dep}-${PCL_VERSION_MAJOR}.${PCL_VERSION_MINOR}")
endforeach()
set(PKG_INTERNAL_DEPS "")
foreach(_dep ${PKGCONFIG_INT_DEPS})
- set(PKG_INTERNAL_DEPS "${PKG_INTERNAL_DEPS} -l${_dep}")
+ string(APPEND PKG_INTERNAL_DEPS " -l${_dep}")
endforeach()
set(_pc_file ${CMAKE_CURRENT_BINARY_DIR}/${_name}-${PCL_VERSION_MAJOR}.${PCL_VERSION_MINOR}.pc)
###############################################################################
# Write a report on the build/not-build status of the subsystems
macro(PCL_WRITE_STATUS_REPORT)
+ message(STATUS "PCL build with following flags:")
+ message(STATUS "${CMAKE_CXX_FLAGS}")
message(STATUS "The following subsystems will be built:")
foreach(_ss ${PCL_SUBSYSTEMS})
PCL_GET_SUBSYS_STATUS(_status ${_ss})
foreach(_sub ${${PCL_SUBSYS_SUBSYS}})
PCL_GET_SUBSYS_STATUS(_sub_status ${_ss}_${_sub})
if(_sub_status)
- set(will_build "${will_build}\n |_ ${_sub}")
+ string(APPEND will_build "\n |_ ${_sub}")
endif()
endforeach()
if(NOT ("${will_build}" STREQUAL ""))
- set(message_text "${message_text}\n building: ${will_build}")
+ string(APPEND message_text "\n building: ${will_build}")
endif()
set(wont_build)
foreach(_sub ${${PCL_SUBSYS_SUBSYS}})
PCL_GET_SUBSYS_HYPERSTATUS(_sub_hyper_status ${_ss}_${sub})
if(NOT _sub_status OR ("${_sub_hyper_status}" STREQUAL "AUTO_OFF"))
GET_IN_MAP(_reason PCL_SUBSYS_REASONS ${_ss}_${_sub})
- set(wont_build "${wont_build}\n |_ ${_sub}: ${_reason}")
+ string(APPEND wont_build "\n |_ ${_sub}: ${_reason}")
endif()
endforeach()
if(NOT ("${wont_build}" STREQUAL ""))
- set(message_text "${message_text}\n not building: ${wont_build}")
+ string(APPEND message_text "\n not building: ${wont_build}")
endif()
endif()
message(STATUS "${message_text}")
endif()
set(DOC_SOURCE_DIR "\"${CMAKE_CURRENT_SOURCE_DIR}\"\\")
foreach(dep ${dependencies})
- set(DOC_SOURCE_DIR
- "${DOC_SOURCE_DIR}\n\t\t\t\t\t\t\t\t\t\t\t\t \"${PCL_SOURCE_DIR}/${dep}\"\\")
+ string(APPEND DOC_SOURCE_DIR "\n\t\t\t\t\t\t\t\t\t\t\t\t \"${PCL_SOURCE_DIR}/${dep}\"\\")
endforeach()
file(MAKE_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/html")
set(doxyfile "${CMAKE_CURRENT_BINARY_DIR}/doxyfile")
#pragma once
-#include <string>
#include <vector>
#include <ostream>
#include <ostream> // for ostream
#include <pcl/PCLHeader.h> // for PCLHeader
+#include <pcl/types.h> //for index_t
namespace pcl
{
{
::pcl::PCLHeader header;
- std::uint32_t height = 0;
- std::uint32_t width = 0;
+ uindex_t height = 0;
+ uindex_t width = 0;
std::string encoding;
std::uint8_t is_bigendian = 0;
- std::uint32_t step = 0;
+ uindex_t step = 0;
std::vector<std::uint8_t> data;
#include <pcl/pcl_macros.h> // for PCL_EXPORTS
#include <pcl/PCLHeader.h>
#include <pcl/PCLPointField.h>
+#include <pcl/types.h>
namespace pcl
{
{
::pcl::PCLHeader header;
- std::uint32_t height = 0;
- std::uint32_t width = 0;
+ uindex_t height = 0;
+ uindex_t width = 0;
std::vector<::pcl::PCLPointField> fields;
static_assert(BOOST_ENDIAN_BIG_BYTE || BOOST_ENDIAN_LITTLE_BYTE, "unable to determine system endianness");
std::uint8_t is_bigendian = BOOST_ENDIAN_BIG_BYTE;
- std::uint32_t point_step = 0;
- std::uint32_t row_step = 0;
+ uindex_t point_step = 0;
+ uindex_t row_step = 0;
std::vector<std::uint8_t> data;
#include <pcl/memory.h> // for shared_ptr
#include <pcl/type_traits.h> // for asEnum_v
+#include <pcl/types.h> // for index_t
#include <string> // for string
#include <ostream> // for ostream
{
std::string name;
- std::uint32_t offset = 0;
+ uindex_t offset = 0;
std::uint8_t datatype = 0;
- std::uint32_t count = 0;
+ uindex_t count = 0;
enum PointFieldTypes { INT8 = traits::asEnum_v<std::int8_t>,
UINT8 = traits::asEnum_v<std::uint8_t>,
#pragma once
-#include <string>
-#include <vector>
#include <ostream>
// Include the correct Header path here
#pragma once
#include <algorithm>
-#include <string>
#include <vector>
#include <ostream>
static bool
concatenate (pcl::PolygonMesh &mesh1, const pcl::PolygonMesh &mesh2)
{
+ const auto point_offset = mesh1.cloud.width * mesh1.cloud.height;
+
bool success = pcl::PCLPointCloud2::concatenate(mesh1.cloud, mesh2.cloud);
if (success == false) {
return false;
// Make the resultant polygon mesh take the newest stamp
mesh1.header.stamp = std::max(mesh1.header.stamp, mesh2.header.stamp);
- const auto point_offset = mesh1.cloud.width * mesh1.cloud.height;
std::transform(mesh2.polygons.begin (),
mesh2.polygons.end (),
std::back_inserter (mesh1.polygons),
#include <pcl/memory.h>
#include <pcl/pcl_macros.h>
+#include <pcl/types.h>
-#include <string>
-#include <vector>
#include <ostream>
namespace pcl
Vertices ()
{}
- std::vector<std::uint32_t> vertices;
+ Indices vertices;
public:
using Ptr = shared_ptr<Vertices>;
#ifdef __GNUC__
#pragma GCC system_header
#endif
+PCL_DEPRECATED_HEADER(1, 15, "Please include the needed boost headers directly.")
#ifndef Q_MOC_RUN
// Marking all Boost headers as system headers to remove warnings
#include <pcl/pcl_macros.h>
#include <pcl/point_types.h>
+#include <type_traits> // for is_floating_point
+#include <array> // for std::array especially in Clang Darwin and MSVC
+
namespace pcl
{
using GlasbeyLUT = ColorLUT<pcl::LUT_GLASBEY>;
using ViridisLUT = ColorLUT<pcl::LUT_VIRIDIS>;
-}
+ /**
+ * @brief Returns a Look-Up Table useful in converting RGB to sRGB
+ * @tparam T floating point type with resultant value
+ * @tparam bits depth of RGB
+ * @return 1-D LUT for converting R, G or B into Rs, Gs or Bs
+ * @remarks sRGB was proposed by Stokes et al. as a uniform default color
+ * space for the internet
+ * M. Stokes, M. Anderson, S. Chandrasekar, and R. Motta: A standard default colorspace for the internet - sRGB (Nov 1996)
+ * IEC 61966-2.1 Default RGB Colour Space - sRGB (International Electrotechnical Commission, Geneva, Switzerland, 1999)
+ * www.srgb.com, www.color.org/srgb.html
+ */
+ template <typename T, std::uint8_t bits = 8>
+ PCL_EXPORTS inline std::array<T, 1 << bits>
+ RGB2sRGB_LUT() noexcept
+ {
+ static_assert(std::is_floating_point<T>::value, "LUT value must be a floating point");
+
+ constexpr const std::size_t size = 1 << bits;
+
+ static const auto sRGB_LUT = [&]() {
+ // MSVC wouldn't take `size` here instead of the expression
+ std::array<T, 1 << bits> LUT;
+ for (std::size_t i = 0; i < size; ++i) {
+ T f = static_cast<T>(i) / static_cast<T>(size - 1);
+ if (f > 0.04045) {
+ // ((f + 0.055)/1.055)^2.4
+ LUT[i] = static_cast<T>(
+ std::pow((f + static_cast<T>(0.055)) / static_cast<T>(1.055),
+ static_cast<T>(2.4)));
+ }
+ else {
+ // f / 12.92
+ LUT[i] = f / static_cast<T>(12.92);
+ }
+ }
+ return LUT;
+ }();
+ return sRGB_LUT;
+ }
+
+ /**
+ * @brief Returns a Look-Up Table useful in converting scaled CIE XYZ into CIE L*a*b*
+ * @details The function assumes that the XYZ values are
+ * * not normalized using reference illuminant
+ * * scaled such that reference illuminant has Xn = Yn = Zn = discretizations
+ * @tparam T floating point type with resultant value
+ * @tparam discretizations number of levels for the LUT
+ * @return 1-D LUT with results of f(X/Xn)
+ * @note This function doesn't convert from CIE XYZ to CIE L*a*b*. The actual conversion
+ * is as follows:
+ * L* = 116 * [f(Y/Yn) - 16/116]
+ * a* = 500 * [f(X/Xn) - f(Y/Yn)]
+ * b* = 200 * [f(Y/Yn) - f(Z/Zn)]
+ * Where, Xn, Yn and Zn are values of the reference illuminant (at prescribed angle)
+ * f is appropriate function such that L* = 100, a* = b* = 0 for white color
+ * Reference: Billmeyer and Saltzman’s Principles of Color Technology
+ */
+ template <typename T, std::size_t discretizations = 4000>
+ PCL_EXPORTS inline const std::array<T, discretizations>&
+ XYZ2LAB_LUT() noexcept
+ {
+ static_assert(std::is_floating_point<T>::value, "LUT value must be a floating point");
+
+ static const auto f_LUT = [&]() {
+ std::array<T, discretizations> LUT;
+ for (std::size_t i = 0; i < discretizations; ++i) {
+ T f = static_cast<T>(i) / static_cast<T>(discretizations);
+ if (f > static_cast<T>(0.008856)) {
+ // f^(1/3)
+ LUT[i] = static_cast<T>(std::pow(f, (static_cast<T>(1) / static_cast<T>(3))));
+ }
+ else {
+ // 7.87 * f + 16/116
+ LUT[i] =
+ static_cast<T>(7.87) * f + (static_cast<T>(16) / static_cast<T>(116));
+ }
+ }
+ return LUT;
+ }();
+ return f_LUT;
+ }
+} // namespace pcl
#pragma once
-#include <pcl/pcl_base.h>
+#ifdef __SSE__
+#include <xmmintrin.h> // for __m128
+#endif // ifdef __SSE__
+#ifdef __AVX__
+#include <immintrin.h> // for __m256
+#endif // ifdef __AVX__
+
+#include <pcl/point_cloud.h> // for PointCloud
+#include <pcl/PointIndices.h> // for PointIndices
+namespace pcl { struct PCLPointCloud2; }
/**
* \file pcl/common/common.h
/** \brief Compute the smallest angle between two 3D vectors in radians (default) or degree.
* \param v1 the first 3D vector (represented as a \a Eigen::Vector4f)
* \param v2 the second 3D vector (represented as a \a Eigen::Vector4f)
+ * \param in_degree determine if angle should be in radians or degrees
* \return the angle between v1 and v2 in radians or degrees
* \note Handles rounding error for parallel and anti-parallel vectors
* \ingroup common
inline double
getAngle3D (const Eigen::Vector3f &v1, const Eigen::Vector3f &v2, const bool in_degree = false);
+#ifdef __SSE__
+ /** \brief Compute the approximate arccosine of four values at once using SSE instructions.
+ *
+ * The approximation used is \f$ (1.59121552+x*(-0.15461442+x*0.05354897))*\sqrt{0.89286965-0.89282669*x}+0.06681017+x*(-0.09402311+x*0.02708663) \f$.
+ * The average error is 0.00012 rad. This approximation is more accurate than other approximations of acos, but also uses a few more operations.
+ * \param x four floats, each should be in [0; 1]. They must not be greater than 1 since acos is undefined there.
+ * They should not be less than 0 because there the approximation is less precise
+ * \return the four arccosines, each in [0; pi/2]
+ * \ingroup common
+ */
+ inline __m128
+ acos_SSE (const __m128 &x);
+
+ /** \brief Similar to getAngle3D, but four times in parallel using SSE instructions.
+ *
+ * This behaves like \f$ min(getAngle3D(dot_product), \pi-getAngle3D(dot_product)) \f$.
+ * All vectors must be normalized (length is 1.0).
+ * Since an approximate acos is used, the results may be slightly imprecise.
+ * \param[in] the x components of the first four vectors
+ * \param[in] the y components of the first four vectors
+ * \param[in] the z components of the first four vectors
+ * \param[in] the x components of the second four vectors
+ * \param[in] the y components of the second four vectors
+ * \param[in] the z components of the second four vectors
+ * \return the four angles in radians in [0; pi/2]
+ * \ingroup common
+ */
+ inline __m128
+ getAcuteAngle3DSSE (const __m128 &x1, const __m128 &y1, const __m128 &z1, const __m128 &x2, const __m128 &y2, const __m128 &z2);
+#endif // ifdef __SSE__
+
+#ifdef __AVX__
+ /** \brief Compute the approximate arccosine of eight values at once using AVX instructions.
+ *
+ * The approximation used is \f$ (1.59121552+x*(-0.15461442+x*0.05354897))*\sqrt{0.89286965-0.89282669*x}+0.06681017+x*(-0.09402311+x*0.02708663) \f$.
+ * The average error is 0.00012 rad. This approximation is more accurate than other approximations of acos, but also uses a few more operations.
+ * \param x eight floats, each should be in [0; 1]. They must not be greater than 1 since acos is undefined there.
+ * They should not be less than 0 because there the approximation is less precise
+ * \return the eight arccosines, each in [0; pi/2]
+ * \ingroup common
+ */
+ inline __m256
+ acos_AVX (const __m256 &x);
+
+ /** \brief Similar to getAngle3D, but eight times in parallel using AVX instructions.
+ *
+ * This behaves like \f$ min(getAngle3D(dot_product), \pi-getAngle3D(dot_product)) \f$.
+ * All vectors must be normalized (length is 1.0).
+ * Since an approximate acos is used, the results may be slightly imprecise.
+ * \param[in] the x components of the first eight vectors
+ * \param[in] the y components of the first eight vectors
+ * \param[in] the z components of the first eight vectors
+ * \param[in] the x components of the second eight vectors
+ * \param[in] the y components of the second eight vectors
+ * \param[in] the z components of the second eight vectors
+ * \return the eight angles in radians in [0; pi/2]
+ * \ingroup common
+ */
+ inline __m256
+ getAcuteAngle3DAVX (const __m256 &x1, const __m256 &y1, const __m256 &z1, const __m256 &x2, const __m256 &y2, const __m256 &z2);
+#endif // ifdef __AVX__
/** \brief Compute both the mean and the standard deviation of an array of values
* \param values the array of values
const Eigen::Vector4f &pivot_pt, Eigen::Vector4f &max_pt);
/** \brief Get the minimum and maximum values on each of the 3 (x-y-z) dimensions in a given pointcloud
- * \param cloud the point cloud data message
- * \param min_pt the resultant minimum bounds
- * \param max_pt the resultant maximum bounds
+ * \param[in] cloud the point cloud data message
+ * \param[out] min_pt the resultant minimum bounds
+ * \param[out] max_pt the resultant maximum bounds
* \ingroup common
*/
template <typename PointT> inline void
getMinMax3D (const pcl::PointCloud<PointT> &cloud, PointT &min_pt, PointT &max_pt);
/** \brief Get the minimum and maximum values on each of the 3 (x-y-z) dimensions in a given pointcloud
- * \param cloud the point cloud data message
- * \param min_pt the resultant minimum bounds
- * \param max_pt the resultant maximum bounds
+ * \param[in] cloud the point cloud data message
+ * \param[out] min_pt the resultant minimum bounds
+ * \param[out] max_pt the resultant maximum bounds
* \ingroup common
*/
template <typename PointT> inline void
Eigen::Vector4f &min_pt, Eigen::Vector4f &max_pt);
/** \brief Get the minimum and maximum values on each of the 3 (x-y-z) dimensions in a given pointcloud
- * \param cloud the point cloud data message
- * \param indices the vector of point indices to use from \a cloud
- * \param min_pt the resultant minimum bounds
- * \param max_pt the resultant maximum bounds
+ * \param[in] cloud the point cloud data message
+ * \param[in] indices the vector of point indices to use from \a cloud
+ * \param[out] min_pt the resultant minimum bounds
+ * \param[out] max_pt the resultant maximum bounds
* \ingroup common
*/
template <typename PointT> inline void
Eigen::Vector4f &min_pt, Eigen::Vector4f &max_pt);
/** \brief Get the minimum and maximum values on each of the 3 (x-y-z) dimensions in a given pointcloud
- * \param cloud the point cloud data message
- * \param indices the vector of point indices to use from \a cloud
- * \param min_pt the resultant minimum bounds
- * \param max_pt the resultant maximum bounds
+ * \param[in] cloud the point cloud data message
+ * \param[in] indices the vector of point indices to use from \a cloud
+ * \param[out] min_pt the resultant minimum bounds
+ * \param[out] max_pt the resultant maximum bounds
* \ingroup common
*/
template <typename PointT> inline void
PCL_EXPORTS void
getMeanStdDev (const std::vector<float> &values, double &mean, double &stddev);
+ /** \brief Compute the median of a list of values (fast). If the number of values is even, take the mean of the two middle values.
+ * This function can be used like this:
+ * \code{.cpp}
+ * std::vector<double> vector{1.0, 25.0, 9.0, 4.0, 16.0};
+ * const double median = pcl::computeMedian (vector.begin (), vector.end (), static_cast<double(*)(double)>(std::sqrt)); // = 3
+ * \endcode
+ * \param[in,out] begin,end Iterators that mark the beginning and end of the value range. These values will be reordered!
+ * \param[in] f A lamda, function pointer, or similar that is implicitly applied to all values before median computation. In reality, it will be applied lazily (i.e. at most twice) and thus may not change the sorting order (e.g. monotonic functions like sqrt are allowed)
+ * \return the median
+ * \ingroup common
+ */
+ template<typename IteratorT, typename Functor> inline auto
+ computeMedian (IteratorT begin, IteratorT end, Functor f) noexcept -> typename std::result_of<Functor(decltype(*begin))>::type
+ {
+ const std::size_t size = std::distance(begin, end);
+ const std::size_t mid = size/2;
+ if (size%2==0)
+ { // Even number of values
+ std::nth_element (begin, begin + (mid-1), end);
+ return (f(begin[mid-1]) + f(*(std::min_element (begin + mid, end)))) / 2.0;
+ }
+ else
+ { // Odd number of values
+ std::nth_element (begin, begin + mid, end);
+ return f(begin[mid]);
+ }
+ }
+
+ /** \brief Compute the median of a list of values (fast). See the other overloaded function for more information.
+ */
+ template<typename IteratorT> inline auto
+ computeMedian (IteratorT begin, IteratorT end) noexcept -> typename std::iterator_traits<IteratorT>::value_type
+ {
+ return computeMedian (begin, end, [](const auto& x){return x;});
+ }
}
/*@}*/
#include <pcl/common/impl/common.hpp>
#pragma once
-#include <pcl/conversions.h>
-
#include <type_traits>
namespace pcl
# pragma disable_warn
#endif
-#include <cmath>
#include <pcl/ModelCoefficients.h>
#include <Eigen/StdVector>
-#include <Eigen/Core>
-#include <Eigen/Eigenvalues>
#include <Eigen/Geometry>
-#include <Eigen/SVD>
#include <Eigen/LU>
-#include <Eigen/Dense>
-#include <Eigen/Eigenvalues>
namespace pcl
{
template <typename Scalar> void
getEulerAngles (const Eigen::Transform<Scalar, 3, Eigen::Affine> &t, Scalar &roll, Scalar &pitch, Scalar &yaw);
- inline void
- getEulerAngles (const Eigen::Affine3f &t, float &roll, float &pitch, float &yaw)
- {
- getEulerAngles<float> (t, roll, pitch, yaw);
- }
-
- inline void
- getEulerAngles (const Eigen::Affine3d &t, double &roll, double &pitch, double &yaw)
- {
- getEulerAngles<double> (t, roll, pitch, yaw);
- }
-
/** Extract x,y,z and the Euler angles (XYZ-convention) from the given transformation
* \param[in] t the input transformation matrix
* \param[out] x the resulting x translation
Scalar &x, Scalar &y, Scalar &z,
Scalar &roll, Scalar &pitch, Scalar &yaw);
- inline void
- getTranslationAndEulerAngles (const Eigen::Affine3f &t,
- float &x, float &y, float &z,
- float &roll, float &pitch, float &yaw)
- {
- getTranslationAndEulerAngles<float> (t, x, y, z, roll, pitch, yaw);
- }
-
- inline void
- getTranslationAndEulerAngles (const Eigen::Affine3d &t,
- double &x, double &y, double &z,
- double &roll, double &pitch, double &yaw)
- {
- getTranslationAndEulerAngles<double> (t, x, y, z, roll, pitch, yaw);
- }
-
/** \brief Create a transformation from the given translation and Euler angles (XYZ-convention)
* \param[in] x the input x translation
* \param[in] y the input y translation
point_out = (transformation * point).template head<3> ();
}
- inline void
- transformPoint (const Eigen::Vector3f &point_in,
- Eigen::Vector3f &point_out,
- const Eigen::Affine3f &transformation)
- {
- transformPoint<float> (point_in, point_out, transformation);
- }
-
- inline void
- transformPoint (const Eigen::Vector3d &point_in,
- Eigen::Vector3d &point_out,
- const Eigen::Affine3d &transformation)
- {
- transformPoint<double> (point_in, point_out, transformation);
- }
-
/** \brief Transform a vector using an affine matrix
* \param[in] vector_in the vector to be transformed
* \param[out] vector_out the transformed vector
vector_out = transformation.linear () * vector_in;
}
- inline void
- transformVector (const Eigen::Vector3f &vector_in,
- Eigen::Vector3f &vector_out,
- const Eigen::Affine3f &transformation)
- {
- transformVector<float> (vector_in, vector_out, transformation);
- }
-
- inline void
- transformVector (const Eigen::Vector3d &vector_in,
- Eigen::Vector3d &vector_out,
- const Eigen::Affine3d &transformation)
- {
- transformVector<double> (vector_in, vector_out, transformation);
- }
-
/** \brief Transform a line using an affine matrix
* \param[in] line_in the line to be transformed
* \param[out] line_out the transformed line
Eigen::Matrix<Scalar, Eigen::Dynamic, 1> &line_out,
const Eigen::Transform<Scalar, 3, Eigen::Affine> &transformation);
- inline bool
- transformLine (const Eigen::VectorXf &line_in,
- Eigen::VectorXf &line_out,
- const Eigen::Affine3f &transformation)
- {
- return (transformLine<float> (line_in, line_out, transformation));
- }
-
- inline bool
- transformLine (const Eigen::VectorXd &line_in,
- Eigen::VectorXd &line_out,
- const Eigen::Affine3d &transformation)
- {
- return (transformLine<double> (line_in, line_out, transformation));
- }
-
/** \brief Transform plane vectors using an affine matrix
* \param[in] plane_in the plane coefficients to be transformed
* \param[out] plane_out the transformed plane coefficients to fill
Eigen::Matrix<Scalar, 4, 1> &plane_out,
const Eigen::Transform<Scalar, 3, Eigen::Affine> &transformation);
- inline void
- transformPlane (const Eigen::Matrix<double, 4, 1> &plane_in,
- Eigen::Matrix<double, 4, 1> &plane_out,
- const Eigen::Transform<double, 3, Eigen::Affine> &transformation)
- {
- transformPlane<double> (plane_in, plane_out, transformation);
- }
-
- inline void
- transformPlane (const Eigen::Matrix<float, 4, 1> &plane_in,
- Eigen::Matrix<float, 4, 1> &plane_out,
- const Eigen::Transform<float, 3, Eigen::Affine> &transformation)
- {
- transformPlane<float> (plane_in, plane_out, transformation);
- }
-
/** \brief Transform plane vectors using an affine matrix
* \param[in] plane_in the plane coefficients to be transformed
* \param[out] plane_out the transformed plane coefficients to fill
* \warning ModelCoefficients stores floats only !
*/
template<typename Scalar> void
- transformPlane (const pcl::ModelCoefficients::Ptr plane_in,
+ transformPlane (const pcl::ModelCoefficients::ConstPtr plane_in,
pcl::ModelCoefficients::Ptr plane_out,
const Eigen::Transform<Scalar, 3, Eigen::Affine> &transformation);
- inline void
- transformPlane (const pcl::ModelCoefficients::Ptr plane_in,
- pcl::ModelCoefficients::Ptr plane_out,
- const Eigen::Transform<double, 3, Eigen::Affine> &transformation)
- {
- transformPlane<double> (plane_in, plane_out, transformation);
- }
-
- inline void
- transformPlane (const pcl::ModelCoefficients::Ptr plane_in,
- pcl::ModelCoefficients::Ptr plane_out,
- const Eigen::Transform<float, 3, Eigen::Affine> &transformation)
- {
- transformPlane<float> (plane_in, plane_out, transformation);
- }
-
/** \brief Check coordinate system integrity
* \param[in] line_x the first axis
* \param[in] line_y the second axis
const Scalar norm_limit = 1e-3,
const Scalar dot_limit = 1e-3);
- inline bool
- checkCoordinateSystem (const Eigen::Matrix<double, Eigen::Dynamic, 1> &line_x,
- const Eigen::Matrix<double, Eigen::Dynamic, 1> &line_y,
- const double norm_limit = 1e-3,
- const double dot_limit = 1e-3)
- {
- return (checkCoordinateSystem<double> (line_x, line_y, norm_limit, dot_limit));
- }
-
- inline bool
- checkCoordinateSystem (const Eigen::Matrix<float, Eigen::Dynamic, 1> &line_x,
- const Eigen::Matrix<float, Eigen::Dynamic, 1> &line_y,
- const float norm_limit = 1e-3,
- const float dot_limit = 1e-3)
- {
- return (checkCoordinateSystem<float> (line_x, line_y, norm_limit, dot_limit));
- }
-
/** \brief Check coordinate system integrity
* \param[in] origin the origin of the coordinate system
* \param[in] x_direction the first axis
const Eigen::Matrix<Scalar, Eigen::Dynamic, 1> to_line_y,
Eigen::Transform<Scalar, 3, Eigen::Affine> &transformation);
- inline bool
- transformBetween2CoordinateSystems (const Eigen::Matrix<double, Eigen::Dynamic, 1> from_line_x,
- const Eigen::Matrix<double, Eigen::Dynamic, 1> from_line_y,
- const Eigen::Matrix<double, Eigen::Dynamic, 1> to_line_x,
- const Eigen::Matrix<double, Eigen::Dynamic, 1> to_line_y,
- Eigen::Transform<double, 3, Eigen::Affine> &transformation)
- {
- return (transformBetween2CoordinateSystems<double> (from_line_x, from_line_y, to_line_x, to_line_y, transformation));
- }
-
- inline bool
- transformBetween2CoordinateSystems (const Eigen::Matrix<float, Eigen::Dynamic, 1> from_line_x,
- const Eigen::Matrix<float, Eigen::Dynamic, 1> from_line_y,
- const Eigen::Matrix<float, Eigen::Dynamic, 1> to_line_x,
- const Eigen::Matrix<float, Eigen::Dynamic, 1> to_line_y,
- Eigen::Transform<float, 3, Eigen::Affine> &transformation)
- {
- return (transformBetween2CoordinateSystems<float> (from_line_x, from_line_y, to_line_x, to_line_y, transformation));
- }
-
}
#include <pcl/common/impl/eigen.hpp>
#pragma once
-#include <pcl/common/eigen.h>
#include <pcl/point_cloud.h>
+#include <Eigen/Core> // for VectorXf
+
#include <functional>
namespace pcl
}
else
{
- PCL_WARN ("[pcl::randomOrthogonalAxis] provided axis has norm < 1E-8f");
+ PCL_WARN ("[pcl::randomOrthogonalAxis] provided axis has norm < 1E-8f\n");
}
rand_ortho_axis.normalize ();
// If the data is dense, we don't need to check for NaN
if (cloud.is_dense)
{
- for (const int& index : indices)
+ for (const auto& index : indices)
{
centroid[0] += cloud[index].x;
centroid[1] += cloud[index].y;
}
// NaN or Inf values could exist => check for them
unsigned cp = 0;
- for (const int& index : indices)
+ for (const auto& index : indices)
{
// Check if the point is invalid
if (!isFinite (cloud [index]))
{
point_count = 0;
// For each point in the cloud
- for (const int &index : indices)
+ for (const auto &index : indices)
{
// Check if the point is invalid
if (!isFinite (cloud[index]))
if (cloud.is_dense)
{
point_count = static_cast<unsigned int> (indices.size ());
- for (const int &index : indices)
+ for (const auto &index : indices)
{
//const PointT& point = cloud[*iIt];
accu [0] += cloud[index].x * cloud[index].x;
else
{
point_count = 0;
- for (const int &index : indices)
+ for (const auto &index : indices)
{
if (!isFinite (cloud[index]))
continue;
if (cloud.is_dense)
{
point_count = indices.size ();
- for (const int &index : indices)
+ for (const auto &index : indices)
{
//const PointT& point = cloud[*iIt];
accu [0] += cloud[index].x * cloud[index].x;
else
{
point_count = 0;
- for (const int &index : indices)
+ for (const auto &index : indices)
{
if (!isFinite (cloud[index]))
continue;
pcl::CentroidPoint<PointInT> cp;
if (cloud.is_dense)
- for (const int &index : indices)
+ for (const auto &index : indices)
cp.add (cloud[index]);
else
- for (const int &index : indices)
+ for (const auto &index : indices)
if (pcl::isFinite (cloud[index]))
cp.add (cloud[index]);
return (in_degree ? std::acos (rad) * 180.0 / M_PI : std::acos (rad));
}
+#ifdef __SSE__
+inline __m128
+pcl::acos_SSE (const __m128 &x)
+{
+ /*
+ This python code generates the coefficients:
+ import math, numpy, scipy.optimize
+ def get_error(S):
+ err_sum=0.0
+ for x in numpy.arange(0.0, 1.0, 0.0025):
+ if (S[3]+S[4]*x)<0.0:
+ err_sum+=10.0
+ else:
+ err_sum+=((S[0]+x*(S[1]+x*S[2]))*numpy.sqrt(S[3]+S[4]*x)+S[5]+x*(S[6]+x*S[7])-math.acos(x))**2.0
+ return err_sum/400.0
+
+ print(scipy.optimize.minimize(fun=get_error, x0=[1.57, 0.0, 0.0, 1.0, -1.0, 0.0, 0.0, 0.0], method='Nelder-Mead', options={'maxiter':42000, 'maxfev':42000, 'disp':True, 'xatol':1e-6, 'fatol':1e-6}))
+ */
+ const __m128 mul_term = _mm_add_ps (_mm_set1_ps (1.59121552f), _mm_mul_ps (x, _mm_add_ps (_mm_set1_ps (-0.15461442f), _mm_mul_ps (x, _mm_set1_ps (0.05354897f)))));
+ const __m128 add_term = _mm_add_ps (_mm_set1_ps (0.06681017f), _mm_mul_ps (x, _mm_add_ps (_mm_set1_ps (-0.09402311f), _mm_mul_ps (x, _mm_set1_ps (0.02708663f)))));
+ return _mm_add_ps (_mm_mul_ps (mul_term, _mm_sqrt_ps (_mm_add_ps (_mm_set1_ps (0.89286965f), _mm_mul_ps (_mm_set1_ps (-0.89282669f), x)))), add_term);
+}
+
+inline __m128
+pcl::getAcuteAngle3DSSE (const __m128 &x1, const __m128 &y1, const __m128 &z1, const __m128 &x2, const __m128 &y2, const __m128 &z2)
+{
+ const __m128 dot_product = _mm_add_ps (_mm_add_ps (_mm_mul_ps (x1, x2), _mm_mul_ps (y1, y2)), _mm_mul_ps (z1, z2));
+ // The andnot-function realizes an abs-operation: the sign bit is removed
+ // -0.0f (negative zero) means that all bits are 0, only the sign bit is 1
+ return acos_SSE (_mm_min_ps (_mm_set1_ps (1.0f), _mm_andnot_ps (_mm_set1_ps (-0.0f), dot_product)));
+}
+#endif // ifdef __SSE__
+
+#ifdef __AVX__
+inline __m256
+pcl::acos_AVX (const __m256 &x)
+{
+ const __m256 mul_term = _mm256_add_ps (_mm256_set1_ps (1.59121552f), _mm256_mul_ps (x, _mm256_add_ps (_mm256_set1_ps (-0.15461442f), _mm256_mul_ps (x, _mm256_set1_ps (0.05354897f)))));
+ const __m256 add_term = _mm256_add_ps (_mm256_set1_ps (0.06681017f), _mm256_mul_ps (x, _mm256_add_ps (_mm256_set1_ps (-0.09402311f), _mm256_mul_ps (x, _mm256_set1_ps (0.02708663f)))));
+ return _mm256_add_ps (_mm256_mul_ps (mul_term, _mm256_sqrt_ps (_mm256_add_ps (_mm256_set1_ps (0.89286965f), _mm256_mul_ps (_mm256_set1_ps (-0.89282669f), x)))), add_term);
+}
+
+inline __m256
+pcl::getAcuteAngle3DAVX (const __m256 &x1, const __m256 &y1, const __m256 &z1, const __m256 &x2, const __m256 &y2, const __m256 &z2)
+{
+ const __m256 dot_product = _mm256_add_ps (_mm256_add_ps (_mm256_mul_ps (x1, x2), _mm256_mul_ps (y1, y2)), _mm256_mul_ps (z1, z2));
+ // The andnot-function realizes an abs-operation: the sign bit is removed
+ // -0.0f (negative zero) means that all bits are 0, only the sign bit is 1
+ return acos_AVX (_mm256_min_ps (_mm256_set1_ps (1.0f), _mm256_andnot_ps (_mm256_set1_ps (-0.0f), dot_product)));
+}
+#endif // ifdef __AVX__
+
//////////////////////////////////////////////////////////////////////////////////////////////
inline void
pcl::getMeanStd (const std::vector<float> &values, double &mean, double &stddev)
template <typename PointT> inline void
pcl::getMinMax3D (const pcl::PointCloud<PointT> &cloud, PointT &min_pt, PointT &max_pt)
{
- Eigen::Array4f min_p, max_p;
- min_p.setConstant (FLT_MAX);
- max_p.setConstant (-FLT_MAX);
-
- // If the data is dense, we don't need to check for NaN
- if (cloud.is_dense)
- {
- for (const auto& point: cloud.points)
- {
- const auto pt = point.getArray4fMap ();
- min_p = min_p.min (pt);
- max_p = max_p.max (pt);
- }
- }
- // NaN or Inf values could exist => check for them
- else
- {
- for (const auto& point: cloud.points)
- {
- // Check if the point is invalid
- if (!std::isfinite (point.x) ||
- !std::isfinite (point.y) ||
- !std::isfinite (point.z))
- continue;
- const auto pt = point.getArray4fMap ();
- min_p = min_p.min (pt);
- max_p = max_p.max (pt);
- }
- }
+ Eigen::Vector4f min_p, max_p;
+ pcl::getMinMax3D (cloud, min_p, max_p);
min_pt.x = min_p[0]; min_pt.y = min_p[1]; min_pt.z = min_p[2];
max_pt.x = max_p[0]; max_pt.y = max_p[1]; max_pt.z = max_p[2];
}
template <typename PointT> inline void
pcl::getMinMax3D (const pcl::PointCloud<PointT> &cloud, Eigen::Vector4f &min_pt, Eigen::Vector4f &max_pt)
{
- Eigen::Array4f min_p, max_p;
- min_p.setConstant (FLT_MAX);
- max_p.setConstant (-FLT_MAX);
+ min_pt.setConstant (FLT_MAX);
+ max_pt.setConstant (-FLT_MAX);
// If the data is dense, we don't need to check for NaN
if (cloud.is_dense)
{
for (const auto& point: cloud.points)
{
- const auto pt = point.getArray4fMap ();
- min_p = min_p.min (pt);
- max_p = max_p.max (pt);
+ const pcl::Vector4fMapConst pt = point.getVector4fMap ();
+ min_pt = min_pt.cwiseMin (pt);
+ max_pt = max_pt.cwiseMax (pt);
}
}
// NaN or Inf values could exist => check for them
!std::isfinite (point.y) ||
!std::isfinite (point.z))
continue;
- const auto pt = point.getArray4fMap ();
- min_p = min_p.min (pt);
- max_p = max_p.max (pt);
+ const pcl::Vector4fMapConst pt = point.getVector4fMap ();
+ min_pt = min_pt.cwiseMin (pt);
+ max_pt = max_pt.cwiseMax (pt);
}
}
- min_pt = min_p;
- max_pt = max_p;
}
pcl::getMinMax3D (const pcl::PointCloud<PointT> &cloud, const pcl::PointIndices &indices,
Eigen::Vector4f &min_pt, Eigen::Vector4f &max_pt)
{
- Eigen::Array4f min_p, max_p;
- min_p.setConstant (FLT_MAX);
- max_p.setConstant (-FLT_MAX);
-
- // If the data is dense, we don't need to check for NaN
- if (cloud.is_dense)
- {
- for (const int &index : indices.indices)
- {
- pcl::Array4fMapConst pt = cloud[index].getArray4fMap ();
- min_p = min_p.min (pt);
- max_p = max_p.max (pt);
- }
- }
- // NaN or Inf values could exist => check for them
- else
- {
- for (const int &index : indices.indices)
- {
- // Check if the point is invalid
- if (!std::isfinite (cloud[index].x) ||
- !std::isfinite (cloud[index].y) ||
- !std::isfinite (cloud[index].z))
- continue;
- pcl::Array4fMapConst pt = cloud[index].getArray4fMap ();
- min_p = min_p.min (pt);
- max_p = max_p.max (pt);
- }
- }
- min_pt = min_p;
- max_pt = max_p;
+ pcl::getMinMax3D (cloud, indices.indices, min_pt, max_pt);
}
//////////////////////////////////////////////////////////////////////////////////////////////
// If the data is dense, we don't need to check for NaN
if (cloud.is_dense)
{
- for (const int &index : indices)
+ for (const auto &index : indices)
{
- pcl::Array4fMapConst pt = cloud[index].getArray4fMap ();
- min_pt = min_pt.array ().min (pt);
- max_pt = max_pt.array ().max (pt);
+ const pcl::Vector4fMapConst pt = cloud[index].getVector4fMap ();
+ min_pt = min_pt.cwiseMin (pt);
+ max_pt = max_pt.cwiseMax (pt);
}
}
// NaN or Inf values could exist => check for them
else
{
- for (const int &index : indices)
+ for (const auto &index : indices)
{
// Check if the point is invalid
if (!std::isfinite (cloud[index].x) ||
!std::isfinite (cloud[index].y) ||
!std::isfinite (cloud[index].z))
continue;
- pcl::Array4fMapConst pt = cloud[index].getArray4fMap ();
- min_pt = min_pt.array ().min (pt);
- max_pt = max_pt.array ().max (pt);
+ const pcl::Vector4fMapConst pt = cloud[index].getVector4fMap ();
+ min_pt = min_pt.cwiseMin (pt);
+ max_pt = max_pt.cwiseMax (pt);
}
}
}
template <typename Scalar> void
-transformPlane (const pcl::ModelCoefficients::Ptr plane_in,
+transformPlane (const pcl::ModelCoefficients::ConstPtr plane_in,
pcl::ModelCoefficients::Ptr plane_out,
const Eigen::Transform<Scalar, 3, Eigen::Affine> &transformation)
{
Eigen::Matrix < Scalar, 4, 1 > v_plane_in (values.data ());
pcl::transformPlane (v_plane_in, v_plane_in, transformation);
plane_out->values.resize (4);
- std::copy_n(v_plane_in.data (), 4, plane_in->values.begin ());
+ std::copy_n(v_plane_in.data (), 4, plane_out->values.begin ());
}
#pragma once
#include <pcl/common/gaussian.h>
-#include <pcl/exceptions.h>
namespace pcl
{
{
output.width = input.width;
output.height = input.height;
- output.points.resize (input.height * input.width);
+ output.resize (input.height * input.width);
}
int i;
{
output.width = input.width;
output.height = input.height;
- output.points.resize (input.height * input.width);
+ output.resize (input.height * input.width);
}
int j;
}
};
+ template<>
+ struct IntensityFieldAccessor<pcl::PointXYZLAB>
+ {
+ inline float
+ operator () (const pcl::PointXYZLAB &p) const
+ {
+ return (p.L);
+ }
+
+ inline void
+ get (const pcl::PointXYZLAB &p, float &intensity) const
+ {
+ intensity = p.L;
+ }
+
+ inline void
+ set (pcl::PointXYZLAB &p, float intensity) const
+ {
+ p.L = intensity;
+ }
+
+ inline void
+ demean (pcl::PointXYZLAB& p, float value) const
+ {
+ p.L -= value;
+ }
+
+ inline void
+ add (pcl::PointXYZLAB& p, float value) const
+ {
+ p.L += value;
+ }
+ };
+
template<>
struct IntensityFieldAccessor<pcl::PointXYZHSV>
{
#pragma once
+#include <pcl/conversions.h> // for FieldAdder
#include <pcl/common/concatenate.h>
#include <pcl/common/copy_point.h>
#include <pcl/common/io.h>
return (result);
}
+namespace detail
+{
+
+ template <typename PointInT, typename PointOutT> void
+ copyPointCloudMemcpy (const pcl::PointCloud<PointInT> &cloud_in,
+ pcl::PointCloud<PointOutT> &cloud_out)
+ {
+ // Iterate over each point, if the point types of two clouds are different
+ for (std::size_t i = 0; i < cloud_in.size (); ++i)
+ copyPoint (cloud_in[i], cloud_out[i]);
+ }
+
+
+ template <typename PointT> void
+ copyPointCloudMemcpy (const pcl::PointCloud<PointT> &cloud_in,
+ pcl::PointCloud<PointT> &cloud_out)
+ {
+ // Use std::copy directly, if the point types of two clouds are same
+ std::copy (&cloud_in[0], (&cloud_in[0]) + cloud_in.size (), &cloud_out[0]);
+ }
+
+} // namespace detail
template <typename PointInT, typename PointOutT> void
copyPointCloud (const pcl::PointCloud<PointInT> &cloud_in,
cloud_out.is_dense = cloud_in.is_dense;
cloud_out.sensor_orientation_ = cloud_in.sensor_orientation_;
cloud_out.sensor_origin_ = cloud_in.sensor_origin_;
- cloud_out.points.resize (cloud_in.size ());
-
- if (cloud_in.points.empty ())
- return;
+ cloud_out.resize (cloud_in.size ());
- if (isSamePointType<PointInT, PointOutT> ())
- // Copy the whole memory block
- memcpy (&cloud_out[0], &cloud_in[0], cloud_in.size () * sizeof (PointInT));
- else
- // Iterate over each point
- for (std::size_t i = 0; i < cloud_in.size (); ++i)
- copyPoint (cloud_in[i], cloud_out[i]);
+ if (!cloud_in.empty ())
+ detail::copyPointCloudMemcpy (cloud_in, cloud_out);
}
}
// Allocate enough space and copy the basics
- cloud_out.points.resize (indices.size ());
+ cloud_out.clear ();
+ cloud_out.reserve (indices.size ());
cloud_out.header = cloud_in.header;
cloud_out.width = indices.size ();
cloud_out.height = 1;
cloud_out.sensor_origin_ = cloud_in.sensor_origin_;
// Iterate over each point
- for (std::size_t i = 0; i < indices.size (); ++i)
- cloud_out[i] = cloud_in[indices[i]];
+ for (const auto& index : indices)
+ cloud_out.transient_push_back (cloud_in[index]);
}
pcl::PointCloud<PointOutT> &cloud_out)
{
// Allocate enough space and copy the basics
- cloud_out.points.resize (indices.size ());
+ cloud_out.resize (indices.size ());
cloud_out.header = cloud_in.header;
cloud_out.width = indices.size ();
cloud_out.height = 1;
const pcl::PointIndices &indices,
pcl::PointCloud<PointT> &cloud_out)
{
- // Do we want to copy everything?
- if (indices.indices.size () == cloud_in.size ())
- {
- cloud_out = cloud_in;
- return;
- }
-
- // Allocate enough space and copy the basics
- cloud_out.points.resize (indices.indices.size ());
- cloud_out.header = cloud_in.header;
- cloud_out.width = indices.indices.size ();
- cloud_out.height = 1;
- cloud_out.is_dense = cloud_in.is_dense;
- cloud_out.sensor_orientation_ = cloud_in.sensor_orientation_;
- cloud_out.sensor_origin_ = cloud_in.sensor_origin_;
-
- // Iterate over each point
- for (std::size_t i = 0; i < indices.indices.size (); ++i)
- cloud_out[i] = cloud_in[indices.indices[i]];
+ copyPointCloud (cloud_in, indices.indices, cloud_out);
}
const std::vector<pcl::PointIndices> &indices,
pcl::PointCloud<PointT> &cloud_out)
{
- int nr_p = 0;
+ std::size_t nr_p = 0;
for (const auto &index : indices)
nr_p += index.indices.size ();
}
// Allocate enough space and copy the basics
- cloud_out.points.resize (nr_p);
+ cloud_out.clear ();
+ cloud_out.reserve (nr_p);
cloud_out.header = cloud_in.header;
cloud_out.width = nr_p;
cloud_out.height = 1;
cloud_out.sensor_origin_ = cloud_in.sensor_origin_;
// Iterate over each cluster
- int cp = 0;
for (const auto &cluster_index : indices)
{
// Iterate over each idx
for (const auto &index : cluster_index.indices)
{
// Iterate over each dimension
- cloud_out[cp] = cloud_in[index];
- cp++;
+ cloud_out.transient_push_back (cloud_in[index]);
}
}
}
}
// Allocate enough space and copy the basics
- cloud_out.points.resize (nr_p);
+ cloud_out.resize (nr_p);
cloud_out.header = cloud_in.header;
cloud_out.width = nr_p;
cloud_out.height = 1;
}
// Resize the output dataset
- cloud_out.points.resize (cloud1_in.size ());
+ cloud_out.resize (cloud1_in.size ());
cloud_out.header = cloud1_in.header;
cloud_out.width = cloud1_in.width;
cloud_out.height = cloud1_in.height;
#pragma once
+#include <Eigen/Eigenvalues> // for SelfAdjointEigenSolver
+
#include <pcl/common/centroid.h>
#include <pcl/common/eigen.h>
#include <pcl/common/pca.h>
#pragma once
-#include <pcl/common/eigen.h>
#include <pcl/common/transformation_from_correspondences.h>
template <typename PointT, typename Scalar> void
transformPointCloud (const pcl::PointCloud<PointT> &cloud_in,
pcl::PointCloud<PointT> &cloud_out,
- const Eigen::Transform<Scalar, 3, Eigen::Affine> &transform,
+ const Eigen::Matrix<Scalar, 4, 4> &transform,
bool copy_all_fields)
{
if (&cloud_in != &cloud_out)
{
cloud_out.header = cloud_in.header;
cloud_out.is_dense = cloud_in.is_dense;
- cloud_out.width = cloud_in.width;
- cloud_out.height = cloud_in.height;
- cloud_out.points.reserve (cloud_in.size ());
+ cloud_out.reserve (cloud_in.size ());
if (copy_all_fields)
- cloud_out.points.assign (cloud_in.points.begin (), cloud_in.points.end ());
+ cloud_out.assign (cloud_in.begin (), cloud_in.end (), cloud_in.width);
else
- cloud_out.points.resize (cloud_in.size ());
+ cloud_out.resize (cloud_in.width, cloud_in.height);
cloud_out.sensor_orientation_ = cloud_in.sensor_orientation_;
cloud_out.sensor_origin_ = cloud_in.sensor_origin_;
}
- pcl::detail::Transformer<Scalar> tf (transform.matrix ());
+ pcl::detail::Transformer<Scalar> tf (transform);
if (cloud_in.is_dense)
{
// If the dataset is dense, simply transform it!
transformPointCloud (const pcl::PointCloud<PointT> &cloud_in,
const Indices &indices,
pcl::PointCloud<PointT> &cloud_out,
- const Eigen::Transform<Scalar, 3, Eigen::Affine> &transform,
+ const Eigen::Matrix<Scalar, 4, 4> &transform,
bool copy_all_fields)
{
std::size_t npts = indices.size ();
cloud_out.header = cloud_in.header;
cloud_out.width = static_cast<int> (npts);
cloud_out.height = 1;
- cloud_out.points.resize (npts);
+ cloud_out.resize (npts);
cloud_out.sensor_orientation_ = cloud_in.sensor_orientation_;
cloud_out.sensor_origin_ = cloud_in.sensor_origin_;
- pcl::detail::Transformer<Scalar> tf (transform.matrix ());
+ pcl::detail::Transformer<Scalar> tf (transform);
if (cloud_in.is_dense)
{
// If the dataset is dense, simply transform it!
template <typename PointT, typename Scalar> void
transformPointCloudWithNormals (const pcl::PointCloud<PointT> &cloud_in,
pcl::PointCloud<PointT> &cloud_out,
- const Eigen::Transform<Scalar, 3, Eigen::Affine> &transform,
+ const Eigen::Matrix<Scalar, 4, 4> &transform,
bool copy_all_fields)
{
if (&cloud_in != &cloud_out)
{
// Note: could be replaced by cloud_out = cloud_in
cloud_out.header = cloud_in.header;
- cloud_out.width = cloud_in.width;
- cloud_out.height = cloud_in.height;
cloud_out.is_dense = cloud_in.is_dense;
- cloud_out.points.reserve (cloud_out.size ());
+ cloud_out.reserve (cloud_in.size ());
if (copy_all_fields)
- cloud_out.points.assign (cloud_in.points.begin (), cloud_in.points.end ());
+ cloud_out.assign (cloud_in.begin (), cloud_in.end (), cloud_in.width);
else
- cloud_out.points.resize (cloud_in.size ());
+ cloud_out.resize (cloud_in.width, cloud_in.height);
cloud_out.sensor_orientation_ = cloud_in.sensor_orientation_;
cloud_out.sensor_origin_ = cloud_in.sensor_origin_;
}
- pcl::detail::Transformer<Scalar> tf (transform.matrix ());
+ pcl::detail::Transformer<Scalar> tf (transform);
// If the data is dense, we don't need to check for NaN
if (cloud_in.is_dense)
{
transformPointCloudWithNormals (const pcl::PointCloud<PointT> &cloud_in,
const Indices &indices,
pcl::PointCloud<PointT> &cloud_out,
- const Eigen::Transform<Scalar, 3, Eigen::Affine> &transform,
+ const Eigen::Matrix<Scalar, 4, 4> &transform,
bool copy_all_fields)
{
std::size_t npts = indices.size ();
cloud_out.header = cloud_in.header;
cloud_out.width = static_cast<int> (npts);
cloud_out.height = 1;
- cloud_out.points.resize (npts);
+ cloud_out.resize (npts);
cloud_out.sensor_orientation_ = cloud_in.sensor_orientation_;
cloud_out.sensor_origin_ = cloud_in.sensor_origin_;
- pcl::detail::Transformer<Scalar> tf (transform.matrix ());
+ pcl::detail::Transformer<Scalar> tf (transform);
// If the data is dense, we don't need to check for NaN
if (cloud_in.is_dense)
{
}
} // namespace pcl
-
#pragma once
+#include <pcl/common/eigen.h> // for computeRoots, eigen33
#include <pcl/common/vector_average.h>
+#include <Eigen/Eigenvalues> // for SelfAdjointEigenSolver
namespace pcl
{
#include <numeric>
#include <string>
-#include <pcl/pcl_base.h>
+#include <pcl/point_cloud.h>
#include <pcl/PointIndices.h>
-#include <pcl/conversions.h>
-#include <pcl/exceptions.h>
#include <pcl/pcl_macros.h>
#include <pcl/PolygonMesh.h>
#include <locale>
return std::distance(cloud.fields.begin (), result);
}
- /** \brief Get the index of a specified field (i.e., dimension/channel)
- * \param[in] cloud the point cloud message
- * \param[in] field_name the string defining the field name
- * \param[out] fields a vector to the original \a PCLPointField vector that the raw PointCloud message contains
- * \ingroup common
- */
- template <typename PointT>
- PCL_DEPRECATED(1, 12, "use getFieldIndex<PointT> (field_name, fields) instead")
- inline int
- getFieldIndex (const pcl::PointCloud<PointT> &cloud, const std::string &field_name,
- std::vector<pcl::PCLPointField> &fields);
-
/** \brief Get the index of a specified field (i.e., dimension/channel)
* \tparam PointT datatype for which fields is being queries
* \param[in] field_name the string defining the field name
getFieldIndex (const std::string &field_name,
const std::vector<pcl::PCLPointField> &fields);
- /** \brief Get the list of available fields (i.e., dimension/channel)
- * \param[in] cloud the point cloud message
- * \param[out] fields a vector to the original \a PCLPointField vector that the raw PointCloud message contains
- * \ingroup common
- */
- template <typename PointT>
- PCL_DEPRECATED(1, 12, "use getFields<PointT> () with return value instead")
- inline void
- getFields (const pcl::PointCloud<PointT> &cloud, std::vector<pcl::PCLPointField> &fields);
-
- /** \brief Get the list of available fields (i.e., dimension/channel)
- * \tparam PointT datatype whose details are requested
- * \param[out] fields a vector to the original \a PCLPointField vector that the raw PointCloud message contains
- * \ingroup common
- */
- template <typename PointT>
- PCL_DEPRECATED(1, 12, "use getFields<PointT> () with return value instead")
- inline void
- getFields (std::vector<pcl::PCLPointField> &fields);
-
/** \brief Get the list of available fields (i.e., dimension/channel)
* \tparam PointT datatype whose details are requested
* \ingroup common
/** \brief Concatenate two pcl::PCLPointCloud2
*
- * \warning This function subtly differs from the deprecated concatenatePointCloud()
- * The difference is that this function will concatenate IFF the non-skip fields
- * are in the correct order and same in number. The deprecated function skipped
- * fields even if both clouds didn't agree on the number of output fields
+ * \warning This function will concatenate IFF the non-skip fields are in the correct
+ * order and same in number.
* \param[in] cloud1 the first input point cloud dataset
* \param[in] cloud2 the second input point cloud dataset
* \param[out] cloud_out the resultant output point cloud dataset
return pcl::PolygonMesh::concatenate(mesh1, mesh2, mesh_out);
}
- /** \brief Concatenate two pcl::PCLPointCloud2
- * \param[in] cloud1 the first input point cloud dataset
- * \param[in] cloud2 the second input point cloud dataset
- * \param[out] cloud_out the resultant output point cloud dataset
- * \return true if successful, false otherwise (e.g., name/number of fields differs)
- * \ingroup common
- */
- PCL_DEPRECATED(1, 12, "use pcl::concatenate() instead, but beware of subtle difference in behavior (see documentation)")
- PCL_EXPORTS bool
- concatenatePointCloud (const pcl::PCLPointCloud2 &cloud1,
- const pcl::PCLPointCloud2 &cloud2,
- pcl::PCLPointCloud2 &cloud_out);
-
/** \brief Extract the indices of a given point cloud as a new point cloud
* \param[in] cloud_in the input point cloud dataset
* \param[in] indices the vector of indices representing the points to be copied from \a cloud_in
*/
PCL_EXPORTS void
copyPointCloud (const pcl::PCLPointCloud2 &cloud_in,
- const IndicesAllocator< Eigen::aligned_allocator<int> > &indices,
+ const IndicesAllocator< Eigen::aligned_allocator<index_t> > &indices,
pcl::PCLPointCloud2 &cloud_out);
/** \brief Copy fields and point cloud data from \a cloud_in to \a cloud_out
* \note Assumes unique indices.
* \ingroup common
*/
- template <typename PointT, typename IndicesVectorAllocator = std::allocator<int>> void
+ template <typename PointT, typename IndicesVectorAllocator = std::allocator<index_t>> void
copyPointCloud (const pcl::PointCloud<PointT> &cloud_in,
const IndicesAllocator< IndicesVectorAllocator> &indices,
pcl::PointCloud<PointT> &cloud_out);
* \note Assumes unique indices.
* \ingroup common
*/
- template <typename PointInT, typename PointOutT, typename IndicesVectorAllocator = std::allocator<int>> void
+ template <typename PointInT, typename PointOutT, typename IndicesVectorAllocator = std::allocator<index_t>> void
copyPointCloud (const pcl::PointCloud<PointInT> &cloud_in,
const IndicesAllocator<IndicesVectorAllocator> &indices,
pcl::PointCloud<PointOutT> &cloud_out);
template<> inline bool isFinite<pcl::BRISKSignature512>(const pcl::BRISKSignature512&) { return (true); }
template<> inline bool isFinite<pcl::BorderDescription>(const pcl::BorderDescription &) { return true; }
template<> inline bool isFinite<pcl::Boundary>(const pcl::Boundary&) { return (true); }
+ template<> inline bool isFinite<pcl::CPPFSignature>(const pcl::CPPFSignature&) { return (true); }
template<> inline bool isFinite<pcl::ESFSignature640>(const pcl::ESFSignature640&) { return (true); }
template<> inline bool isFinite<pcl::FPFHSignature33>(const pcl::FPFHSignature33&) { return (true); }
+ template<> inline bool isFinite<pcl::GASDSignature512>(const pcl::GASDSignature512&) { return (true); }
+ template<> inline bool isFinite<pcl::GASDSignature984>(const pcl::GASDSignature984&) { return (true); }
+ template<> inline bool isFinite<pcl::GASDSignature7992>(const pcl::GASDSignature7992&) { return (true); }
+ template<> inline bool isFinite<pcl::GRSDSignature21>(const pcl::GRSDSignature21&) { return (true); }
template<> inline bool isFinite<pcl::Intensity>(const pcl::Intensity&) { return (true); }
template<> inline bool isFinite<pcl::IntensityGradient>(const pcl::IntensityGradient&) { return (true); }
template<> inline bool isFinite<pcl::Label>(const pcl::Label&) { return (true); }
template<> inline bool isFinite<pcl::PFHRGBSignature250>(const pcl::PFHRGBSignature250&) { return (true); }
template<> inline bool isFinite<pcl::PFHSignature125>(const pcl::PFHSignature125&) { return (true); }
template<> inline bool isFinite<pcl::PPFRGBSignature>(const pcl::PPFRGBSignature&) { return (true); }
- template<> inline bool isFinite<pcl::PPFSignature>(const pcl::PPFSignature&) { return (true); }
+
+ template<> inline bool isFinite<pcl::PPFSignature>(const pcl::PPFSignature& pt)
+ {
+ return std::isfinite(pt.f1) && std::isfinite(pt.f2) && std::isfinite(pt.f3) && std::isfinite(pt.f4) && std::isfinite(pt.alpha_m);
+ }
+
template<> inline bool isFinite<pcl::PrincipalCurvatures>(const pcl::PrincipalCurvatures&) { return (true); }
template<> inline bool isFinite<pcl::PrincipalRadiiRSD>(const pcl::PrincipalRadiiRSD&) { return (true); }
template<> inline bool isFinite<pcl::RGB>(const pcl::RGB&) { return (true); }
};
/** \brief UniformGenerator class generates a random number from range [min, max] at each run picked
- * according to a uniform distribution i.e eaach number within [min, max] has almost the same
+ * according to a uniform distribution i.e each number within [min, max] has almost the same
* probability of being drawn.
*
* \author Nizar Sallem
#pragma once
#include <pcl/pcl_macros.h>
-#ifndef Q_MOC_RUN
+
#include <boost/signals2.hpp>
-#endif
#include <condition_variable>
#include <functional>
#pragma once
-#include <pcl/common/eigen.h>
+#include <Eigen/Core> // for Vector3f, Matrix
+#include <Eigen/Geometry> // for Affine3f
namespace pcl
{
transformPointCloud (const pcl::PointCloud<PointT> &cloud_in,
pcl::PointCloud<PointT> &cloud_out,
const Eigen::Transform<Scalar, 3, Eigen::Affine> &transform,
- bool copy_all_fields = true);
+ bool copy_all_fields = true)
+ {
+ return (transformPointCloud<PointT, Scalar> (cloud_in, cloud_out, transform.matrix (), copy_all_fields));
+ }
template <typename PointT> void
transformPointCloud (const pcl::PointCloud<PointT> &cloud_in,
const Eigen::Affine3f &transform,
bool copy_all_fields = true)
{
- return (transformPointCloud<PointT, float> (cloud_in, cloud_out, transform, copy_all_fields));
+ return (transformPointCloud<PointT, float> (cloud_in, cloud_out, transform.matrix (), copy_all_fields));
}
/** \brief Apply an affine transform defined by an Eigen Transform
const Indices &indices,
pcl::PointCloud<PointT> &cloud_out,
const Eigen::Transform<Scalar, 3, Eigen::Affine> &transform,
- bool copy_all_fields = true);
+ bool copy_all_fields = true)
+ {
+ return (transformPointCloud<PointT, Scalar> (cloud_in, indices, cloud_out, transform.matrix (), copy_all_fields));
+ }
template <typename PointT> void
transformPointCloud (const pcl::PointCloud<PointT> &cloud_in,
const Eigen::Affine3f &transform,
bool copy_all_fields = true)
{
- return (transformPointCloud<PointT, float> (cloud_in, indices, cloud_out, transform, copy_all_fields));
+ return (transformPointCloud<PointT, float> (cloud_in, indices, cloud_out, transform.matrix (), copy_all_fields));
}
/** \brief Apply an affine transform defined by an Eigen Transform
const Eigen::Transform<Scalar, 3, Eigen::Affine> &transform,
bool copy_all_fields = true)
{
- return (transformPointCloud<PointT, Scalar> (cloud_in, indices.indices, cloud_out, transform, copy_all_fields));
+ return (transformPointCloud<PointT, Scalar> (cloud_in, indices.indices, cloud_out, transform.matrix (), copy_all_fields));
}
template <typename PointT> void
const Eigen::Affine3f &transform,
bool copy_all_fields = true)
{
- return (transformPointCloud<PointT, float> (cloud_in, indices, cloud_out, transform, copy_all_fields));
+ return (transformPointCloud<PointT, float> (cloud_in, indices.indices, cloud_out, transform.matrix (), copy_all_fields));
}
/** \brief Transform a point cloud and rotate its normals using an Eigen transform.
transformPointCloudWithNormals (const pcl::PointCloud<PointT> &cloud_in,
pcl::PointCloud<PointT> &cloud_out,
const Eigen::Transform<Scalar, 3, Eigen::Affine> &transform,
- bool copy_all_fields = true);
+ bool copy_all_fields = true)
+ {
+ return (transformPointCloudWithNormals<PointT, Scalar> (cloud_in, cloud_out, transform.matrix (), copy_all_fields));
+ }
template <typename PointT> void
transformPointCloudWithNormals (const pcl::PointCloud<PointT> &cloud_in,
const Eigen::Affine3f &transform,
bool copy_all_fields = true)
{
- return (transformPointCloudWithNormals<PointT, float> (cloud_in, cloud_out, transform, copy_all_fields));
+ return (transformPointCloudWithNormals<PointT, float> (cloud_in, cloud_out, transform.matrix (), copy_all_fields));
}
/** \brief Transform a point cloud and rotate its normals using an Eigen transform.
const Indices &indices,
pcl::PointCloud<PointT> &cloud_out,
const Eigen::Transform<Scalar, 3, Eigen::Affine> &transform,
- bool copy_all_fields = true);
+ bool copy_all_fields = true)
+ {
+ return (transformPointCloudWithNormals<PointT, Scalar> (cloud_in, indices, cloud_out, transform.matrix (), copy_all_fields));
+ }
template <typename PointT> void
transformPointCloudWithNormals (const pcl::PointCloud<PointT> &cloud_in,
const Eigen::Affine3f &transform,
bool copy_all_fields = true)
{
- return (transformPointCloudWithNormals<PointT, float> (cloud_in, indices, cloud_out, transform, copy_all_fields));
+ return (transformPointCloudWithNormals<PointT, float> (cloud_in, indices, cloud_out, transform.matrix (), copy_all_fields));
}
/** \brief Transform a point cloud and rotate its normals using an Eigen transform.
const Eigen::Transform<Scalar, 3, Eigen::Affine> &transform,
bool copy_all_fields = true)
{
- return (transformPointCloudWithNormals<PointT, Scalar> (cloud_in, indices.indices, cloud_out, transform, copy_all_fields));
+ return (transformPointCloudWithNormals<PointT, Scalar> (cloud_in, indices.indices, cloud_out, transform.matrix (), copy_all_fields));
}
const Eigen::Affine3f &transform,
bool copy_all_fields = true)
{
- return (transformPointCloudWithNormals<PointT, float> (cloud_in, indices, cloud_out, transform, copy_all_fields));
+ return (transformPointCloudWithNormals<PointT, float> (cloud_in, indices.indices, cloud_out, transform.matrix (), copy_all_fields));
}
/** \brief Apply a rigid transform defined by a 4x4 matrix
transformPointCloud (const pcl::PointCloud<PointT> &cloud_in,
pcl::PointCloud<PointT> &cloud_out,
const Eigen::Matrix<Scalar, 4, 4> &transform,
- bool copy_all_fields = true)
- {
- Eigen::Transform<Scalar, 3, Eigen::Affine> t (transform);
- return (transformPointCloud<PointT, Scalar> (cloud_in, cloud_out, t, copy_all_fields));
- }
+ bool copy_all_fields = true);
template <typename PointT> void
transformPointCloud (const pcl::PointCloud<PointT> &cloud_in,
const Indices &indices,
pcl::PointCloud<PointT> &cloud_out,
const Eigen::Matrix<Scalar, 4, 4> &transform,
- bool copy_all_fields = true)
- {
- Eigen::Transform<Scalar, 3, Eigen::Affine> t (transform);
- return (transformPointCloud<PointT, Scalar> (cloud_in, indices, cloud_out, t, copy_all_fields));
- }
+ bool copy_all_fields = true);
template <typename PointT> void
transformPointCloud (const pcl::PointCloud<PointT> &cloud_in,
transformPointCloudWithNormals (const pcl::PointCloud<PointT> &cloud_in,
pcl::PointCloud<PointT> &cloud_out,
const Eigen::Matrix<Scalar, 4, 4> &transform,
- bool copy_all_fields = true)
- {
- Eigen::Transform<Scalar, 3, Eigen::Affine> t (transform);
- return (transformPointCloudWithNormals<PointT, Scalar> (cloud_in, cloud_out, t, copy_all_fields));
- }
+ bool copy_all_fields = true);
template <typename PointT> void
const Indices &indices,
pcl::PointCloud<PointT> &cloud_out,
const Eigen::Matrix<Scalar, 4, 4> &transform,
- bool copy_all_fields = true)
- {
- Eigen::Transform<Scalar, 3, Eigen::Affine> t (transform);
- return (transformPointCloudWithNormals<PointT, Scalar> (cloud_in, indices, cloud_out, t, copy_all_fields));
- }
+ bool copy_all_fields = true);
template <typename PointT> void
const Eigen::Matrix<Scalar, 4, 4> &transform,
bool copy_all_fields = true)
{
- Eigen::Transform<Scalar, 3, Eigen::Affine> t (transform);
- return (transformPointCloudWithNormals<PointT, Scalar> (cloud_in, indices, cloud_out, t, copy_all_fields));
+ return (transformPointCloudWithNormals<PointT, Scalar> (cloud_in, indices.indices, cloud_out, transform, copy_all_fields));
}
const Eigen::Matrix4f &transform,
bool copy_all_fields = true)
{
- return (transformPointCloudWithNormals<PointT, float> (cloud_in, indices, cloud_out, transform, copy_all_fields));
+ return (transformPointCloudWithNormals<PointT, float> (cloud_in, indices.indices, cloud_out, transform, copy_all_fields));
}
/** \brief Apply a rigid transform defined by a 3D offset and a quaternion
#pragma once
+#include <Eigen/Core> // for Matrix
+
#include <pcl/memory.h>
#include <pcl/pcl_macros.h>
-#include <pcl/common/eigen.h>
namespace pcl
{
#pragma once
#include <cstdio>
-#include <cstdarg>
#include <pcl/pcl_exports.h>
#include <pcl/pcl_config.h>
+// Use e.g. like this:
+// PCL_INFO_STREAM("Info: this is a point: " << pcl::PointXYZ(1.0, 2.0, 3.0) << std::endl);
+// PCL_ERROR_STREAM("Error: an Eigen vector: " << std::endl << Eigen::Vector3f(1.0, 2.0, 3.0) << std::endl);
+#define PCL_LOG_STREAM(LEVEL, STREAM, CSTR, ATTR, FG, ARGS) if(pcl::console::isVerbosityLevelEnabled(pcl::console::LEVEL)) { fflush(stdout); pcl::console::change_text_color(CSTR, pcl::console::ATTR, pcl::console::FG); STREAM << ARGS; pcl::console::reset_text_color(CSTR); }
+#define PCL_ALWAYS_STREAM(ARGS) PCL_LOG_STREAM(L_ALWAYS, std::cout, stdout, TT_RESET, TT_WHITE, ARGS)
+#define PCL_ERROR_STREAM(ARGS) PCL_LOG_STREAM(L_ERROR, std::cerr, stderr, TT_BRIGHT, TT_RED, ARGS)
+#define PCL_WARN_STREAM(ARGS) PCL_LOG_STREAM(L_WARN, std::cerr, stderr, TT_BRIGHT, TT_YELLOW, ARGS)
+#define PCL_INFO_STREAM(ARGS) PCL_LOG_STREAM(L_INFO, std::cout, stdout, TT_RESET, TT_WHITE, ARGS)
+#define PCL_DEBUG_STREAM(ARGS) PCL_LOG_STREAM(L_DEBUG, std::cout, stdout, TT_RESET, TT_GREEN, ARGS)
+#define PCL_VERBOSE_STREAM(ARGS) PCL_LOG_STREAM(L_VERBOSE, std::cout, stdout, TT_RESET, TT_WHITE, ARGS)
+
+
#define PCL_ALWAYS(...) pcl::console::print (pcl::console::L_ALWAYS, __VA_ARGS__)
#define PCL_ERROR(...) pcl::console::print (pcl::console::L_ERROR, __VA_ARGS__)
#define PCL_WARN(...) pcl::console::print (pcl::console::L_WARN, __VA_ARGS__)
#include <pcl/point_cloud.h>
#include <pcl/type_traits.h>
#include <pcl/for_each_type.h>
-#include <pcl/exceptions.h>
#include <pcl/console/print.h>
-#ifndef Q_MOC_RUN
+
#include <boost/foreach.hpp>
-#endif
namespace pcl
{
template<typename U> void operator() ()
{
pcl::PCLPointField f;
- f.name = traits::name<PointT, U>::value;
- f.offset = traits::offset<PointT, U>::value;
- f.datatype = traits::datatype<PointT, U>::value;
- f.count = traits::datatype<PointT, U>::size;
+ f.name = pcl::traits::name<PointT, U>::value;
+ f.offset = pcl::traits::offset<PointT, U>::value;
+ f.datatype = pcl::traits::datatype<PointT, U>::value;
+ f.count = pcl::traits::datatype<PointT, U>::size;
fields_.push_back (f);
}
{
FieldMapping mapping;
mapping.serialized_offset = field.offset;
- mapping.struct_offset = traits::offset<PointT, Tag>::value;
- mapping.size = sizeof (typename traits::datatype<PointT, Tag>::type);
+ mapping.struct_offset = pcl::traits::offset<PointT, Tag>::value;
+ mapping.size = sizeof (typename pcl::traits::datatype<PointT, Tag>::type);
map_.push_back (mapping);
return;
}
}
// Disable thrown exception per #595: http://dev.pointclouds.org/issues/595
- PCL_WARN ("Failed to find match for field '%s'.\n", traits::name<PointT, Tag>::value);
+ PCL_WARN ("Failed to find match for field '%s'.\n", pcl::traits::name<PointT, Tag>::value);
//throw pcl::InvalidConversionException (ss.str ());
}
// Copy point data
std::uint32_t num_points = msg.width * msg.height;
- cloud.points.resize (num_points);
+ cloud.resize (num_points);
std::uint8_t* cloud_data = reinterpret_cast<std::uint8_t*>(&cloud[0]);
// Check if we can copy adjacent points in a single memcpy. We can do so if there
else
{
// If not, memcpy each group of contiguous fields separately
- for (std::uint32_t row = 0; row < msg.height; ++row)
+ for (index_t row = 0; row < msg.height; ++row)
{
const std::uint8_t* row_data = &msg.data[row * msg.row_step];
- for (std::uint32_t col = 0; col < msg.width; ++col)
+ for (index_t col = 0; col < msg.width; ++col)
{
const std::uint8_t* msg_data = row_data + col * msg.point_step;
for (const detail::FieldMapping& mapping : field_map)
}
// ensor_msgs::image_encodings::BGR8;
+ msg.header = cloud.header;
msg.encoding = "bgr8";
msg.step = msg.width * sizeof (std::uint8_t) * 3;
msg.data.resize (msg.step * msg.height);
int point_step = cloud.point_step;
// pcl::image_encodings::BGR8;
+ msg.header = cloud.header;
msg.encoding = "bgr8";
msg.step = static_cast<std::uint32_t>(msg.width * sizeof (std::uint8_t) * 3);
msg.data.resize (msg.step * msg.height);
#include <Eigen/StdVector>
#include <Eigen/Geometry>
#include <pcl/pcl_exports.h>
-#include <pcl/pcl_macros.h>
namespace pcl
{
#include <stdexcept>
#include <sstream>
-#include <pcl/pcl_macros.h>
#include <boost/current_function.hpp>
/** PCL_THROW_EXCEPTION a helper macro to be used for throwing exceptions.
#pragma GCC system_header
#endif
-#ifndef Q_MOC_RUN
#include <boost/mpl/is_sequence.hpp>
#include <boost/mpl/begin_end.hpp>
#include <boost/mpl/next_prior.hpp>
#include <boost/mpl/contains.hpp>
#include <boost/mpl/not.hpp>
#include <boost/mpl/aux_/unwrap.hpp>
-#endif
#include <type_traits>
{
if ((nb_rows > input_->height) || (row_start > input_->height))
{
- PCL_ERROR ("[PCLBase::setIndices] cloud is only %d height", input_->height);
+ PCL_ERROR ("[PCLBase::setIndices] cloud is only %d height\n", input_->height);
return;
}
if ((nb_cols > input_->width) || (col_start > input_->width))
{
- PCL_ERROR ("[PCLBase::setIndices] cloud is only %d width", input_->width);
+ PCL_ERROR ("[PCLBase::setIndices] cloud is only %d width\n", input_->width);
return;
}
std::size_t row_end = row_start + nb_rows;
if (row_end > input_->height)
{
- PCL_ERROR ("[PCLBase::setIndices] %d is out of rows range %d", row_end, input_->height);
+ PCL_ERROR ("[PCLBase::setIndices] %d is out of rows range %d\n", row_end, input_->height);
return;
}
std::size_t col_end = col_start + nb_cols;
if (col_end > input_->width)
{
- PCL_ERROR ("[PCLBase::setIndices] %d is out of columns range %d", col_end, input_->width);
+ PCL_ERROR ("[PCLBase::setIndices] %d is out of columns range %d\n", col_end, input_->width);
return;
}
{
// Check if input was set
if (!input_)
+ {
+ PCL_ERROR ("[initCompute] No input set.\n");
return (false);
+ }
// If no point indices have been given, construct a set of indices for the entire input point cloud
if (!indices_)
(pcl::PointXYZRGBA) \
(pcl::PointXYZRGB) \
(pcl::PointXYZRGBL) \
+ (pcl::PointXYZLAB) \
(pcl::PointXYZHSV) \
(pcl::PointXY) \
(pcl::InterestPoint) \
(pcl::PointXYZRGBA) \
(pcl::PointXYZRGB) \
(pcl::PointXYZRGBL) \
+ (pcl::PointXYZLAB) \
(pcl::PointXYZHSV) \
(pcl::InterestPoint) \
(pcl::PointNormal) \
(pcl::BRISKSignature512) \
(pcl::Narf36)
+// Define all point types that have descriptorSize() member function
+#define PCL_DESCRIPTOR_FEATURE_POINT_TYPES \
+ (pcl::PFHSignature125) \
+ (pcl::PFHRGBSignature250) \
+ (pcl::FPFHSignature33) \
+ (pcl::VFHSignature308) \
+ (pcl::GASDSignature512) \
+ (pcl::GASDSignature984) \
+ (pcl::GASDSignature7992) \
+ (pcl::GRSDSignature21) \
+ (pcl::ESFSignature640) \
+ (pcl::BRISKSignature512) \
+ (pcl::Narf36)
+
+
namespace pcl
{
+ namespace detail
+ {
+ namespace traits
+ {
+ template<typename FeaturePointT> struct descriptorSize {};
+
+ template<> struct descriptorSize<PFHSignature125> { static constexpr const int value = 125; };
+ template<> struct descriptorSize<PFHRGBSignature250> { static constexpr const int value = 250; };
+ template<> struct descriptorSize<ShapeContext1980> { static constexpr const int value = 1980; };
+ template<> struct descriptorSize<UniqueShapeContext1960> { static constexpr const int value = 1960; };
+ template<> struct descriptorSize<SHOT352> { static constexpr const int value = 352; };
+ template<> struct descriptorSize<SHOT1344> { static constexpr const int value = 1344; };
+ template<> struct descriptorSize<FPFHSignature33> { static constexpr const int value = 33; };
+ template<> struct descriptorSize<VFHSignature308> { static constexpr const int value = 308; };
+ template<> struct descriptorSize<GRSDSignature21> { static constexpr const int value = 21; };
+ template<> struct descriptorSize<BRISKSignature512> { static constexpr const int value = 512; };
+ template<> struct descriptorSize<ESFSignature640> { static constexpr const int value = 640; };
+ template<> struct descriptorSize<GASDSignature512> { static constexpr const int value = 512; };
+ template<> struct descriptorSize<GASDSignature984> { static constexpr const int value = 984; };
+ template<> struct descriptorSize<GASDSignature7992> { static constexpr const int value = 7992; };
+ template<> struct descriptorSize<GFPFHSignature16> { static constexpr const int value = 16; };
+ template<> struct descriptorSize<Narf36> { static constexpr const int value = 36; };
+ template<int N> struct descriptorSize<Histogram<N>> { static constexpr const int value = N; };
+
+
+ template<typename FeaturePointT>
+ static constexpr int descriptorSize_v = descriptorSize<FeaturePointT>::value;
+ }
+ }
using Array3fMap = Eigen::Map<Eigen::Array3f>;
using Array3fMapConst = const Eigen::Map<const Eigen::Array3f>;
rgba = p.rgba;
}
- inline PointXYZRGBA (): PointXYZRGBA (0, 0, 0, 0) {}
+ inline PointXYZRGBA (): PointXYZRGBA (0, 0, 0, 255) {}
inline PointXYZRGBA (std::uint8_t _r, std::uint8_t _g, std::uint8_t _b, std::uint8_t _a):
PointXYZRGBA (0.f, 0.f, 0.f, _r, _g, _b, _a) {}
inline PointXYZRGBA (float _x, float _y, float _z):
- PointXYZRGBA (_x, _y, _z, 0, 0, 0, 0) {}
+ PointXYZRGBA (_x, _y, _z, 0, 0, 0, 255) {}
inline PointXYZRGBA (float _x, float _y, float _z, std::uint8_t _r,
std::uint8_t _g, std::uint8_t _b, std::uint8_t _a)
};
+ struct EIGEN_ALIGN16 _PointXYZLAB
+ {
+ PCL_ADD_POINT4D; // this adds the members x,y,z
+ union
+ {
+ struct
+ {
+ float L;
+ float a;
+ float b;
+ };
+ float data_lab[4];
+ };
+ PCL_MAKE_ALIGNED_OPERATOR_NEW
+ };
+
+ PCL_EXPORTS std::ostream& operator << (std::ostream& os, const PointXYZLAB& p);
+ /** \brief A point structure representing Euclidean xyz coordinates, and the CIELAB color.
+ * \ingroup common
+ */
+ struct PointXYZLAB : public _PointXYZLAB
+ {
+ inline PointXYZLAB (const _PointXYZLAB &p)
+ {
+ x = p.x; y = p.y; z = p.z; data[3] = 1.0f;
+ L = p.L; a = p.a; b = p.b;
+ }
+
+ inline PointXYZLAB()
+ {
+ x = y = z = 0.0f;
+ data[3] = 1.0f; // important for homogeneous coordinates
+ L = a = b = 0.0f;
+ data_lab[3] = 0.0f;
+ }
+
+ friend std::ostream& operator << (std::ostream& os, const PointXYZLAB& p);
+ PCL_MAKE_ALIGNED_OPERATOR_NEW
+ };
+
+
struct EIGEN_ALIGN16 _PointXYZHSV
{
PCL_ADD_POINT4D; // This adds the members x,y,z which can also be accessed using the point (which is float[4])
inline PointWithViewpoint (): PointWithViewpoint (0.f, 0.f, 0.f) {}
- PCL_DEPRECATED(1, 12, "Use ctor accepting all position (x, y, z) data")
- inline PointWithViewpoint (float _x, float _y = 0.f):
- PointWithViewpoint (_x, _y, 0.f) {}
-
- PCL_DEPRECATED(1, 12, "Use ctor accepting all viewpoint (vp_x, vp_y, vp_z) data")
- inline PointWithViewpoint (float _x, float _y, float _z, float _vp_x, float _vp_y = 0.f):
- PointWithViewpoint (_x, _y, _z, _vp_x, _vp_y, 0.f) {}
-
inline PointWithViewpoint (float _x, float _y, float _z): PointWithViewpoint (_x, _y, _z, 0.f, 0.f, 0.f) {}
inline PointWithViewpoint (float _x, float _y, float _z, float _vp_x, float _vp_y, float _vp_z)
struct PFHSignature125
{
float histogram[125] = {0.f};
- static int descriptorSize () { return 125; }
+ static constexpr int descriptorSize () { return detail::traits::descriptorSize_v<PFHSignature125>; }
inline PFHSignature125 () = default;
friend std::ostream& operator << (std::ostream& os, const PFHSignature125& p);
};
+
PCL_EXPORTS std::ostream& operator << (std::ostream& os, const PFHRGBSignature250& p);
/** \brief A point structure representing the Point Feature Histogram with colors (PFHRGB).
* \ingroup common
struct PFHRGBSignature250
{
float histogram[250] = {0.f};
- static int descriptorSize () { return 250; }
+ static constexpr int descriptorSize () { return detail::traits::descriptorSize_v<PFHRGBSignature250>; }
inline PFHRGBSignature250 () = default;
{
float descriptor[1980] = {0.f};
float rf[9] = {0.f};
- static int descriptorSize () { return 1980; }
+ static constexpr int descriptorSize () { return detail::traits::descriptorSize_v<ShapeContext1980>; }
inline ShapeContext1980 () = default;
{
float descriptor[1960] = {0.f};
float rf[9] = {0.f};
- static int descriptorSize () { return 1960; }
+ static constexpr int descriptorSize () { return detail::traits::descriptorSize_v<UniqueShapeContext1960>; }
inline UniqueShapeContext1960 () = default;
{
float descriptor[352] = {0.f};
float rf[9] = {0.f};
- static int descriptorSize () { return 352; }
+ static constexpr int descriptorSize () { return detail::traits::descriptorSize_v<SHOT352>; }
inline SHOT352 () = default;
{
float descriptor[1344] = {0.f};
float rf[9] = {0.f};
- static int descriptorSize () { return 1344; }
+ static constexpr int descriptorSize () { return detail::traits::descriptorSize_v<SHOT1344>; }
inline SHOT1344 () = default;
struct FPFHSignature33
{
float histogram[33] = {0.f};
- static int descriptorSize () { return 33; }
+ static constexpr int descriptorSize () { return detail::traits::descriptorSize_v<FPFHSignature33>; }
inline FPFHSignature33 () = default;
struct VFHSignature308
{
float histogram[308] = {0.f};
- static int descriptorSize () { return 308; }
+ static constexpr int descriptorSize () { return detail::traits::descriptorSize_v<VFHSignature308>; }
inline VFHSignature308 () = default;
struct GRSDSignature21
{
float histogram[21] = {0.f};
- static int descriptorSize () { return 21; }
+ static constexpr int descriptorSize () { return detail::traits::descriptorSize_v<GRSDSignature21>; }
inline GRSDSignature21 () = default;
float scale = 0.f;
float orientation = 0.f;
unsigned char descriptor[64] = {0};
- static int descriptorSize () { return 64; }
+ static constexpr int descriptorSize () { return detail::traits::descriptorSize_v<BRISKSignature512>; }
inline BRISKSignature512 () = default;
struct ESFSignature640
{
float histogram[640] = {0.f};
- static int descriptorSize () { return 640; }
+ static constexpr int descriptorSize () { return detail::traits::descriptorSize_v<ESFSignature640>; }
inline ESFSignature640 () = default;
struct GASDSignature512
{
float histogram[512] = {0.f};
- static int descriptorSize() { return 512; }
+ static constexpr int descriptorSize() { return detail::traits::descriptorSize_v<GASDSignature512>; }
inline GASDSignature512 () = default;
struct GASDSignature984
{
float histogram[984] = {0.f};
- static int descriptorSize() { return 984; }
+ static constexpr int descriptorSize() { return detail::traits::descriptorSize_v<GASDSignature984>; }
inline GASDSignature984 () = default;
struct GASDSignature7992
{
float histogram[7992] = {0.f};
- static int descriptorSize() { return 7992; }
+ static constexpr int descriptorSize() { return detail::traits::descriptorSize_v<GASDSignature7992>; }
inline GASDSignature7992 () = default;
struct GFPFHSignature16
{
float histogram[16] = {0.f};
- static int descriptorSize () { return 16; }
+ static constexpr int descriptorSize () { return detail::traits::descriptorSize_v<GFPFHSignature16>; }
inline GFPFHSignature16 () = default;
{
float x = 0.f, y = 0.f, z = 0.f, roll = 0.f, pitch = 0.f, yaw = 0.f;
float descriptor[36] = {0.f};
- static int descriptorSize () { return 36; }
+ static constexpr int descriptorSize () { return detail::traits::descriptorSize_v<Narf36>; }
inline Narf36 () = default;
struct Histogram
{
float histogram[N];
- static int descriptorSize () { return N; }
+ static constexpr int descriptorSize () { return detail::traits::descriptorSize_v<Histogram<N>>; }
};
struct EIGEN_ALIGN16 _PointWithScale
operator << (std::ostream& os, const Histogram<N>& p)
{
// make constexpr
- if (N > 0)
+ PCL_IF_CONSTEXPR(N > 0)
{
os << "(" << p.histogram[0];
std::for_each(p.histogram + 1, std::end(p.histogram),
)
POINT_CLOUD_REGISTER_POINT_WRAPPER(pcl::PointXYZRGBL, pcl::_PointXYZRGBL)
+POINT_CLOUD_REGISTER_POINT_STRUCT (pcl::_PointXYZLAB,
+ (float, x, x)
+ (float, y, y)
+ (float, z, z)
+ (float, L, L)
+ (float, a, a)
+ (float, b, b)
+)
+POINT_CLOUD_REGISTER_POINT_WRAPPER(pcl::PointXYZLAB, pcl::_PointXYZLAB)
+
POINT_CLOUD_REGISTER_POINT_STRUCT (pcl::_PointXYZHSV,
(float, x, x)
(float, y, y)
#include <pcl/memory.h>
#include <pcl/pcl_macros.h>
-#include <Eigen/StdVector>
-#include <Eigen/Core>
-
// Point Cloud message includes. Needed everywhere.
#include <pcl/point_cloud.h>
#include <pcl/PointIndices.h>
bool fake_indices_;
/** \brief The size of each individual field. */
- std::vector<int> field_sizes_;
+ std::vector<uindex_t> field_sizes_;
/** \brief The x-y-z fields indices. */
index_t x_idx_, y_idx_, z_idx_;
#include <pcl/pcl_config.h>
+#include <boost/preprocessor/arithmetic/add.hpp>
#include <boost/preprocessor/comparison/equal.hpp>
#include <boost/preprocessor/comparison/less.hpp>
#include <boost/preprocessor/control/if.hpp>
/**
* \brief Tests for Minor < PCL_MINOR_VERSION
+ * \details When PCL VERSION is of format `34.12.99`, this macro behaves as if it is
+ * already `34.13.0`, and allows for smoother transition for maintainers
*/
#define _PCL_COMPAT_MINOR_VERSION(Minor, IfPass, IfFail) \
- BOOST_PP_IF(BOOST_PP_LESS(PCL_MINOR_VERSION, Minor), IfPass, IfFail)
+ BOOST_PP_IF(BOOST_PP_EQUAL(PCL_REVISION_VERSION, 99), \
+ BOOST_PP_IF(BOOST_PP_LESS(BOOST_PP_ADD(PCL_MINOR_VERSION, 1), Minor), IfPass, IfFail), \
+ BOOST_PP_IF(BOOST_PP_LESS(PCL_MINOR_VERSION, Minor), IfPass, IfFail))
/**
* \brief Tests for Major == PCL_MAJOR_VERSION
+ * \details When PCL VERSION is of format `34.99.12`, this macro behaves as if it is
+ * already `35.0.0`, and allows for smoother transition for maintainers
*/
#define _PCL_COMPAT_MAJOR_VERSION(Major, IfPass, IfFail) \
- BOOST_PP_IF(BOOST_PP_EQUAL(PCL_MAJOR_VERSION, Major), IfPass, IfFail)
+ BOOST_PP_IF(BOOST_PP_EQUAL(PCL_MINOR_VERSION, 99), \
+ BOOST_PP_IF(BOOST_PP_EQUAL(BOOST_PP_ADD(PCL_MAJOR_VERSION, 1), Major), IfPass, IfFail), \
+ BOOST_PP_IF(BOOST_PP_EQUAL(PCL_MAJOR_VERSION, Major), IfPass, IfFail))
/**
* \brief macro for compatibility across compilers and help remove old deprecated
#endif
#endif // defined _WIN32
-
-template<typename T>
-PCL_DEPRECATED(1, 12, "use std::isnan instead of pcl_isnan")
-bool pcl_isnan (T&& x) { return std::isnan (std::forward<T> (x)); }
-
-template<typename T>
-PCL_DEPRECATED(1, 12, "use std::isfinite instead of pcl_isfinite")
-bool pcl_isfinite (T&& x) { return std::isfinite (std::forward<T> (x)); }
-
-template<typename T>
-PCL_DEPRECATED(1, 12, "use std::isinf instead of pcl_isinf")
-bool pcl_isinf (T&& x) { return std::isinf (std::forward<T> (x)); }
-
-
#ifndef DEG2RAD
#define DEG2RAD(x) ((x)*0.017453293)
#endif
#endif
#endif
+namespace pcl {
+
inline void*
-aligned_malloc (std::size_t size)
+aligned_malloc(std::size_t size)
{
- void *ptr;
-#if defined (MALLOC_ALIGNED)
- ptr = std::malloc (size);
-#elif defined (HAVE_POSIX_MEMALIGN)
- if (posix_memalign (&ptr, 16, size))
+ void* ptr;
+#if defined(MALLOC_ALIGNED)
+ ptr = std::malloc(size);
+#elif defined(HAVE_POSIX_MEMALIGN)
+ if (posix_memalign(&ptr, 16, size))
ptr = 0;
-#elif defined (HAVE_MM_MALLOC)
- ptr = _mm_malloc (size, 16);
-#elif defined (_MSC_VER)
- ptr = _aligned_malloc (size, 16);
-#elif defined (ANDROID)
- ptr = memalign (16, size);
+#elif defined(HAVE_MM_MALLOC)
+ ptr = _mm_malloc(size, 16);
+#elif defined(_MSC_VER)
+ ptr = _aligned_malloc(size, 16);
+#elif defined(ANDROID)
+ ptr = memalign(16, size);
#else
- #error aligned_malloc not supported on your platform
+#error aligned_malloc not supported on your platform
ptr = 0;
#endif
return (ptr);
}
inline void
-aligned_free (void* ptr)
+aligned_free(void* ptr)
{
-#if defined (MALLOC_ALIGNED) || defined (HAVE_POSIX_MEMALIGN)
- std::free (ptr);
-#elif defined (HAVE_MM_MALLOC)
- _mm_free (ptr);
-#elif defined (_MSC_VER)
- _aligned_free (ptr);
-#elif defined (ANDROID)
- free (ptr);
+#if defined(MALLOC_ALIGNED) || defined(HAVE_POSIX_MEMALIGN)
+ std::free(ptr);
+#elif defined(HAVE_MM_MALLOC)
+ _mm_free(ptr);
+#elif defined(_MSC_VER)
+ _aligned_free(ptr);
+#elif defined(ANDROID)
+ free(ptr);
#else
- #error aligned_free not supported on your platform
+#error aligned_free not supported on your platform
#endif
}
+} // namespace pcl
+
/**
* \brief Macro to add a no-op or a fallthrough attribute based on compiler feature
*
#else
#define PCL_NODISCARD
#endif
+
+#ifdef __cpp_if_constexpr
+ #define PCL_IF_CONSTEXPR(x) if constexpr(x)
+#else
+ #define PCL_IF_CONSTEXPR(x) if (x)
+#endif
#include <pcl/pcl_macros.h>
#include <pcl/type_traits.h>
#include <pcl/types.h>
+#include <pcl/console/print.h> // for PCL_WARN
-#include <algorithm>
#include <utility>
#include <vector>
PCL_MAKE_ALIGNED_OPERATOR_NEW
};
- namespace detail
- {
- template <typename PointT>
- PCL_DEPRECATED(1, 12, "use createMapping() instead")
- shared_ptr<pcl::MsgFieldMap>&
- getMapping (pcl::PointCloud<PointT>& p);
- } // namespace detail
-
/** \brief PointCloud represents the base class in PCL for storing collections of 3D points.
*
* The class is templated, which means you need to specify the type of data
*/
PointCloud () = default;
- /** \brief Copy constructor.
- * \param[in] pc the cloud to copy into this
- * \todo Erase once mapping_ is removed.
- */
- // Ignore unknown pragma warning on MSVC (4996)
- #pragma warning(push)
- #pragma warning(disable: 4068)
- // Ignore deprecated warning on clang compilers
- #pragma clang diagnostic push
- #pragma clang diagnostic ignored "-Wdeprecated-declarations"
- PointCloud (const PointCloud<PointT> &pc) = default;
- #pragma clang diagnostic pop
- #pragma warning(pop)
-
/** \brief Copy constructor from point cloud subset
* \param[in] pc the cloud to copy into this
* \param[in] indices the subset to copy
// libstdc++ (GCC) on calling reserve allocates new memory, copies and deallocates old vector
// This causes a drastic performance hit. Prefer not to use reserve with libstdc++ (default on clang)
- cloud1.points.insert (cloud1.points.end (), cloud2.points.begin (), cloud2.points.end ());
+ cloud1.insert (cloud1.end (), cloud2.begin (), cloud2.end ());
cloud1.width = cloud1.size ();
cloud1.height = 1;
/** \brief Return an Eigen MatrixXf (assumes float values) mapped to the specified dimensions of the PointCloud.
* \note This method is for advanced users only! Use with care!
*
- * \attention Since 1.4.0, Eigen matrices are forced to Row Major to increase the efficiency of the algorithms in PCL
- * This means that the behavior of getMatrixXfMap changed, and is now correctly mapping 1-1 with a PointCloud structure,
- * that is: number of points in a cloud = rows in a matrix, number of point dimensions = columns in a matrix
+ * \attention Compile time flags used for Eigen might affect the dimension of the Eigen::Map returned. If Eigen
+ * is using row major storage, the matrix shape would be (number of Points X elements in a Point) else
+ * the matrix shape would be (elements in a Point X number of Points). Essentially,
+ * * Major direction: number of points in cloud
+ * * Minor direction: number of point dimensions
+ * By default, as of Eigen 3.3, Eigen uses Column major storage
*
* \param[in] dim the number of dimensions to consider for each point
* \param[in] stride the number of values in each point (will be the number of values that separate two of the columns)
/** \brief Return an Eigen MatrixXf (assumes float values) mapped to the specified dimensions of the PointCloud.
* \note This method is for advanced users only! Use with care!
*
- * \attention Since 1.4.0, Eigen matrices are forced to Row Major to increase the efficiency of the algorithms in PCL
- * This means that the behavior of getMatrixXfMap changed, and is now correctly mapping 1-1 with a PointCloud structure,
- * that is: number of points in a cloud = rows in a matrix, number of point dimensions = columns in a matrix
+ * \attention Compile time flags used for Eigen might affect the dimension of the Eigen::Map returned. If Eigen
+ * is using row major storage, the matrix shape would be (number of Points X elements in a Point) else
+ * the matrix shape would be (elements in a Point X number of Points). Essentially,
+ * * Major direction: number of points in cloud
+ * * Minor direction: number of point dimensions
+ * By default, as of Eigen 3.3, Eigen uses Column major storage
*
* \param[in] dim the number of dimensions to consider for each point
* \param[in] stride the number of values in each point (will be the number of values that separate two of the columns)
//capacity
inline std::size_t size () const { return points.size (); }
- index_t max_size() const noexcept { return static_cast<index_t>(points.max_size()); }
+ inline index_t max_size() const noexcept { return static_cast<index_t>(points.max_size()); }
inline void reserve (std::size_t n) { points.reserve (n); }
inline bool empty () const { return points.empty (); }
- PointT* data() noexcept { return points.data(); }
- const PointT* data() const noexcept { return points.data(); }
+ inline PointT* data() noexcept { return points.data(); }
+ inline const PointT* data() const noexcept { return points.data(); }
/**
* \brief Resizes the container to contain `count` elements
}
}
+ /**
+ * \brief Resizes the container to contain `new_width * new_height` elements
+ * \details
+ * * If the current size is greater than the requested size, the pointcloud
+ * is reduced to its first requested elements
+ * * If the current size is less then the requested size, additional
+ * default-inserted points are appended
+ * \param[in] new_width new width of the point cloud
+ * \param[in] new_height new height of the point cloud
+ */
+ inline void
+ resize(uindex_t new_width, uindex_t new_height)
+ {
+ points.resize(new_width * new_height);
+ width = new_width;
+ height = new_height;
+ }
+
/**
* \brief Resizes the container to contain count elements
* \details
* \param[in] count new size of the point cloud
* \param[in] value the value to initialize the new points with
*/
- void
+ inline void
resize(index_t count, const PointT& value)
{
points.resize(count, value);
}
}
+ /**
+ * \brief Resizes the container to contain count elements
+ * \details
+ * * If the current size is greater then the requested size, the pointcloud
+ * is reduced to its first requested elements
+ * * If the current size is less then the requested size, additional
+ * default-inserted points are appended
+ * \param[in] new_width new width of the point cloud
+ * \param[in] new_height new height of the point cloud
+ * \param[in] value the value to initialize the new points with
+ */
+ inline void
+ resize(index_t new_width, index_t new_height, const PointT& value)
+ {
+ points.resize(new_width * new_height, value);
+ width = new_width;
+ height = new_height;
+ }
+
//element access
inline const PointT& operator[] (std::size_t n) const { return (points[n]); }
inline PointT& operator[] (std::size_t n) { return (points[n]); }
* \brief Replaces the points with `count` copies of `value`
* \note This breaks the organized structure of the cloud by setting the height to
* 1!
+ * \param[in] count new size of the point cloud
+ * \param[in] value value each point of the cloud should have
*/
- void
+ inline void
assign(index_t count, const PointT& value)
{
points.assign(count, value);
height = 1;
}
+ /**
+ * \brief Replaces the points with `new_width * new_height` copies of `value`
+ * \param[in] new_width new width of the point cloud
+ * \param[in] new_height new height of the point cloud
+ * \param[in] value value each point of the cloud should have
+ */
+ inline void
+ assign(index_t new_width, index_t new_height, const PointT& value)
+ {
+ points.assign(new_width * new_height, value);
+ width = new_width;
+ height = new_height;
+ }
+
/**
* \brief Replaces the points with copies of those in the range `[first, last)`
* \details The behavior is undefined if either argument is an iterator into
* \note This breaks the organized structure of the cloud by setting the height to
* 1!
*/
- template <class InputIt>
- void
- assign(InputIt first, InputIt last)
+ template <class InputIterator>
+ inline void
+ assign(InputIterator first, InputIterator last)
{
points.assign(std::move(first), std::move(last));
width = static_cast<std::uint32_t>(size());
height = 1;
}
+ /**
+ * \brief Replaces the points with copies of those in the range `[first, last)`
+ * \details The behavior is undefined if either argument is an iterator into
+ * `*this`
+ * \note This calculates the height based on size and width provided. This means
+ * the assignment happens even if the size is not perfectly divisible by width
+ * \param[in] first, last the range from which the points are copied
+ * \param[in] new_width new width of the point cloud
+ */
+ template <class InputIterator>
+ inline void
+ assign(InputIterator first, InputIterator last, index_t new_width)
+ {
+ points.assign(std::move(first), std::move(last));
+ width = new_width;
+ height = size() / width;
+ if (width * height != size()) {
+ PCL_WARN("Mismatch in assignment. Requested width (%zu) doesn't divide "
+ "provided size (%zu) cleanly. Setting height to 1\n",
+ static_cast<std::size_t>(width),
+ static_cast<std::size_t>(size()));
+ width = size();
+ height = 1;
+ }
+ }
+
/**
* \brief Replaces the points with the elements from the initializer list `ilist`
* \note This breaks the organized structure of the cloud by setting the height to
* 1!
*/
void
- assign(std::initializer_list<PointT> ilist)
+ inline assign(std::initializer_list<PointT> ilist)
{
points.assign(std::move(ilist));
width = static_cast<std::uint32_t>(size());
height = 1;
}
+ /**
+ * \brief Replaces the points with the elements from the initializer list `ilist`
+ * \note This calculates the height based on size and width provided. This means
+ * the assignment happens even if the size is not perfectly divisible by width
+ * \param[in] ilist initializer list from which the points are copied
+ * \param[in] new_width new width of the point cloud
+ */
+ void
+ inline assign(std::initializer_list<PointT> ilist, index_t new_width)
+ {
+ points.assign(std::move(ilist));
+ width = new_width;
+ height = size() / width;
+ if (width * height != size()) {
+ PCL_WARN("Mismatch in assignment. Requested width (%zu) doesn't divide "
+ "provided size (%zu) cleanly. Setting height to 1\n",
+ static_cast<std::size_t>(width),
+ static_cast<std::size_t>(size()));
+ width = size();
+ height = 1;
+ }
+ }
+
/** \brief Insert a new point in the cloud, at the end of the container.
* \note This breaks the organized structure of the cloud by setting the height to 1!
* \param[in] pt the point to insert
height = 1;
}
+ /** \brief Insert a new point in the cloud, at the end of the container.
+ * \note This assumes the user would correct the width and height later on!
+ * \param[in] pt the point to insert
+ */
+ inline void
+ transient_push_back (const PointT& pt)
+ {
+ points.push_back (pt);
+ }
+
/** \brief Emplace a new point in the cloud, at the end of the container.
* \note This breaks the organized structure of the cloud by setting the height to 1!
* \param[in] args the parameters to forward to the point to construct
return points.back();
}
+ /** \brief Emplace a new point in the cloud, at the end of the container.
+ * \note This assumes the user would correct the width and height later on!
+ * \param[in] args the parameters to forward to the point to construct
+ * \return reference to the emplaced point
+ */
+ template <class... Args> inline reference
+ transient_emplace_back (Args&& ...args)
+ {
+ points.emplace_back (std::forward<Args> (args)...);
+ return points.back();
+ }
+
/** \brief Insert a new point in the cloud, given an iterator.
* \note This breaks the organized structure of the cloud by setting the height to 1!
* \param[in] position where to insert the point
inline iterator
insert (iterator position, const PointT& pt)
{
- iterator it = points.insert (position, pt);
+ iterator it = points.insert (std::move(position), pt);
width = size ();
height = 1;
return (it);
}
+ /** \brief Insert a new point in the cloud, given an iterator.
+ * \note This assumes the user would correct the width and height later on!
+ * \param[in] position where to insert the point
+ * \param[in] pt the point to insert
+ * \return returns the new position iterator
+ */
+ inline iterator
+ transient_insert (iterator position, const PointT& pt)
+ {
+ iterator it = points.insert (std::move(position), pt);
+ return (it);
+ }
+
/** \brief Insert a new point in the cloud N times, given an iterator.
* \note This breaks the organized structure of the cloud by setting the height to 1!
* \param[in] position where to insert the point
inline void
insert (iterator position, std::size_t n, const PointT& pt)
{
- points.insert (position, n, pt);
+ points.insert (std::move(position), n, pt);
width = size ();
height = 1;
}
+ /** \brief Insert a new point in the cloud N times, given an iterator.
+ * \note This assumes the user would correct the width and height later on!
+ * \param[in] position where to insert the point
+ * \param[in] n the number of times to insert the point
+ * \param[in] pt the point to insert
+ */
+ inline void
+ transient_insert (iterator position, std::size_t n, const PointT& pt)
+ {
+ points.insert (std::move(position), n, pt);
+ }
+
/** \brief Insert a new range of points in the cloud, at a certain position.
* \note This breaks the organized structure of the cloud by setting the height to 1!
* \param[in] position where to insert the data
template <class InputIterator> inline void
insert (iterator position, InputIterator first, InputIterator last)
{
- points.insert (position, first, last);
+ points.insert (std::move(position), std::move(first), std::move(last));
width = size ();
height = 1;
}
+ /** \brief Insert a new range of points in the cloud, at a certain position.
+ * \note This assumes the user would correct the width and height later on!
+ * \param[in] position where to insert the data
+ * \param[in] first where to start inserting the points from
+ * \param[in] last where to stop inserting the points from
+ */
+ template <class InputIterator> inline void
+ transient_insert (iterator position, InputIterator first, InputIterator last)
+ {
+ points.insert (std::move(position), std::move(first), std::move(last));
+ }
+
/** \brief Emplace a new point in the cloud, given an iterator.
* \note This breaks the organized structure of the cloud by setting the height to 1!
* \param[in] position iterator before which the point will be emplaced
template <class... Args> inline iterator
emplace (iterator position, Args&& ...args)
{
- iterator it = points.emplace (position, std::forward<Args> (args)...);
+ iterator it = points.emplace (std::move(position), std::forward<Args> (args)...);
width = size ();
height = 1;
return (it);
}
+ /** \brief Emplace a new point in the cloud, given an iterator.
+ * \note This assumes the user would correct the width and height later on!
+ * \param[in] position iterator before which the point will be emplaced
+ * \param[in] args the parameters to forward to the point to construct
+ * \return returns the new position iterator
+ */
+ template <class... Args> inline iterator
+ transient_emplace (iterator position, Args&& ...args)
+ {
+ iterator it = points.emplace (std::move(position), std::forward<Args> (args)...);
+ return (it);
+ }
+
/** \brief Erase a point in the cloud.
* \note This breaks the organized structure of the cloud by setting the height to 1!
* \param[in] position what data point to erase
inline iterator
erase (iterator position)
{
- iterator it = points.erase (position);
+ iterator it = points.erase (std::move(position));
width = size ();
height = 1;
return (it);
}
+ /** \brief Erase a point in the cloud.
+ * \note This assumes the user would correct the width and height later on!
+ * \param[in] position what data point to erase
+ * \return returns the new position iterator
+ */
+ inline iterator
+ transient_erase (iterator position)
+ {
+ iterator it = points.erase (std::move(position));
+ return (it);
+ }
+
/** \brief Erase a set of points given by a (first, last) iterator pair
* \note This breaks the organized structure of the cloud by setting the height to 1!
* \param[in] first where to start erasing points from
inline iterator
erase (iterator first, iterator last)
{
- iterator it = points.erase (first, last);
+ iterator it = points.erase (std::move(first), std::move(last));
width = size ();
height = 1;
return (it);
}
+ /** \brief Erase a set of points given by a (first, last) iterator pair
+ * \note This assumes the user would correct the width and height later on!
+ * \param[in] first where to start erasing points from
+ * \param[in] last where to stop erasing points from
+ * \return returns the new position iterator
+ */
+ inline iterator
+ transient_erase (iterator first, iterator last)
+ {
+ iterator it = points.erase (std::move(first), std::move(last));
+ return (it);
+ }
+
/** \brief Swap a point cloud with another cloud.
* \param[in,out] rhs point cloud to swap this with
*/
inline Ptr
makeShared () const { return Ptr (new PointCloud<PointT> (*this)); }
- protected:
- /** \brief This is motivated by ROS integration. Users should not need to access mapping_.
- * \todo Once mapping_ is removed, erase the explicitly defined copy constructor in PointCloud.
- */
- PCL_DEPRECATED(1, 12, "rewrite your code to avoid using this protected field") shared_ptr<MsgFieldMap> mapping_;
-
- friend shared_ptr<MsgFieldMap>& detail::getMapping<PointT>(pcl::PointCloud<PointT> &p);
-
- public:
PCL_MAKE_ALIGNED_OPERATOR_NEW
};
- namespace detail
- {
- template <typename PointT> shared_ptr<pcl::MsgFieldMap>&
- getMapping (pcl::PointCloud<PointT>& p)
- {
- return (p.mapping_);
- }
- } // namespace detail
template <typename PointT> std::ostream&
operator << (std::ostream& s, const pcl::PointCloud<PointT> &p)
#pragma once
-// https://bugreports.qt-project.org/browse/QTBUG-22829
-#ifndef Q_MOC_RUN
#include <boost/mpl/assert.hpp> // for BOOST_MPL_ASSERT_MSG
#include <boost/mpl/identity.hpp> // for boost::mpl::identity
#include <boost/preprocessor/seq/enum.hpp> // for BOOST_PP_SEQ_ENUM
#include <boost/preprocessor/tuple/elem.hpp> // for BOOST_PP_TUPLE_ELEM
#include <boost/preprocessor/stringize.hpp> // for BOOST_PP_STRINGIZE
-#endif
// This is required for the workaround at line 84
#ifdef _MSC_VER
*/
struct PointXYZRGBL;
+ /** \brief Members: float x, y, z, L, a, b
+ * \ingroup common
+ */
+ struct PointXYZLAB;
+
/** \brief Members: float x, y, z, h, s, v
* \ingroup common
*/
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
+#include <pcl/common/colors.h> // for RGB2sRGB_LUT
+
namespace pcl
{
// r,g,b, i values are from 0 to 255
if (out.h < 0.f) out.h += 360.f;
}
+ /** \brief Convert a XYZRGB-based point type to a XYZLAB
+ * \param[in] in the input XYZRGB(XYZRGBA, XYZRGBL, etc.) point
+ * \param[out] out the output XYZLAB point
+ */
+ template <typename PointT, traits::HasColor<PointT> = true>
+ inline void
+ PointXYZRGBtoXYZLAB (const PointT& in,
+ PointXYZLAB& out)
+ {
+ out.x = in.x;
+ out.y = in.y;
+ out.z = in.z;
+ out.data[3] = 1.0; // important for homogeneous coordinates
+
+ // convert sRGB to CIELAB
+ // for sRGB -> CIEXYZ see http://www.easyrgb.com/index.php?X=MATH&H=02#text2
+ // for CIEXYZ -> CIELAB see http://www.easyrgb.com/index.php?X=MATH&H=07#text7
+ // an overview at: https://www.comp.nus.edu.sg/~leowwk/papers/colordiff.pdf
+
+ const auto& sRGB_LUT = RGB2sRGB_LUT<double, 8>();
+
+ const double R = sRGB_LUT[in.r];
+ const double G = sRGB_LUT[in.g];
+ const double B = sRGB_LUT[in.b];
+
+ // linear sRGB -> CIEXYZ, D65 illuminant, observer at 2 degrees
+ const double X = R * 0.4124 + G * 0.3576 + B * 0.1805;
+ const double Y = R * 0.2126 + G * 0.7152 + B * 0.0722;
+ const double Z = R * 0.0193 + G * 0.1192 + B * 0.9505;
+
+ // normalize X, Y, Z with tristimulus values for Xn, Yn, Zn
+ float f[3] = {static_cast<float>(X), static_cast<float>(Y), static_cast<float>(Z)};
+ f[0] /= 0.95047;
+ f[1] /= 1;
+ f[2] /= 1.08883;
+
+ // CIEXYZ -> CIELAB
+ for (int i = 0; i < 3; ++i) {
+ if (f[i] > 0.008856) {
+ f[i] = std::pow(f[i], 1.0 / 3.0);
+ }
+ else {
+ f[i] = 7.787 * f[i] + 16.0 / 116.0;
+ }
+ }
+
+ out.L = 116.0f * f[1] - 16.0f;
+ out.a = 500.0f * (f[0] - f[1]);
+ out.b = 200.0f * (f[1] - f[2]);
+ }
+
/** \brief Convert a XYZRGBA point type to a XYZHSV
* \param[in] in the input XYZRGBA point
* \param[out] out the output XYZHSV point
{
Intensity p;
PointRGBtoI (point, p);
- out.points.push_back (p);
+ out.push_back (p);
}
}
{
Intensity8u p;
PointRGBtoI (point, p);
- out.points.push_back (p);
+ out.push_back (p);
}
}
{
Intensity32u p;
PointRGBtoI (point, p);
- out.points.push_back (p);
+ out.push_back (p);
}
}
{
PointXYZHSV p;
PointXYZRGBtoXYZHSV (point, p);
- out.points.push_back (p);
+ out.push_back (p);
}
}
{
PointXYZHSV p;
PointXYZRGBAtoXYZHSV (point, p);
- out.points.push_back (p);
+ out.push_back (p);
}
}
{
PointXYZI p;
PointXYZRGBtoXYZI (point, p);
- out.points.push_back (p);
+ out.push_back (p);
}
}
pt.g = image.at (u, v).g;
pt.b = image.at (u, v).b;
- out.points.push_back (pt);
+ out.push_back (pt);
}
}
out.width = width_;
#pragma once
-#include <cmath>
#include <pcl/point_types.h>
#include <pcl/point_cloud.h>
#include <pcl/pcl_macros.h>
#include <pcl/common/distances.h>
#include <pcl/common/point_tests.h> // for pcl::isFinite
-
+#include <pcl/common/vector_average.h> // for VectorAverage3f
namespace pcl
{
#include <pcl/point_cloud.h>
#include <pcl/pcl_macros.h>
#include <pcl/point_types.h>
-#include <pcl/PCLPointCloud2.h> // for PCLPointCloud2
#include <pcl/common/angles.h> // for deg2rad
-#include <pcl/common/vector_average.h>
-#include <typeinfo>
+namespace pcl { struct PCLPointCloud2; }
namespace pcl
{
#pragma warning (disable: 4244)
#endif
-//https://bugreports.qt-project.org/browse/QTBUG-22829
-#ifndef Q_MOC_RUN
#include <pcl/point_struct_traits.h> // for pcl::traits::POD, POINT_CLOUD_REGISTER_FIELD_(NAME, OFFSET, DATATYPE), POINT_CLOUD_REGISTER_POINT_FIELD_LIST
#include <boost/mpl/assert.hpp> // for BOOST_MPL_ASSERT_MSG
#include <boost/preprocessor/seq/for_each.hpp> // for BOOST_PP_SEQ_FOR_EACH
#include <boost/preprocessor/seq/transform.hpp> // for BOOST_PP_SEQ_TRANSFORM
#include <boost/preprocessor/tuple/elem.hpp> // for BOOST_PP_TUPLE_ELEM
#include <boost/preprocessor/cat.hpp> // for BOOST_PP_CAT
-#endif
#include <cstdint> // for std::uint32_t
#include <type_traits> // for std::enable_if_t, std::is_array, std::remove_const_t, std::remove_all_extents_t
#include <cstdint>
+#include <Eigen/Core>
+
namespace pcl
{
- using uint8_t PCL_DEPRECATED(1, 12, "use std::uint8_t instead of pcl::uint8_t") = std::uint8_t;
- using int8_t PCL_DEPRECATED(1, 12, "use std::int8_t instead of pcl::int8_t") = std::int8_t;
- using uint16_t PCL_DEPRECATED(1, 12, "use std::uint16_t instead of pcl::uint16_t") = std::uint16_t;
- using int16_t PCL_DEPRECATED(1, 12, "use std::uint16_t instead of pcl::int16_t") = std::int16_t;
- using uint32_t PCL_DEPRECATED(1, 12, "use std::uint32_t instead of pcl::uint32_t") = std::uint32_t;
- using int32_t PCL_DEPRECATED(1, 12, "use std::int32_t instead of pcl::int32_t") = std::int32_t;
- using uint64_t PCL_DEPRECATED(1, 12, "use std::uint64_t instead of pcl::uint64_t") = std::uint64_t;
- using int64_t PCL_DEPRECATED(1, 12, "use std::int64_t instead of pcl::int64_t") = std::int64_t;
- using int_fast16_t PCL_DEPRECATED(1, 12, "use std::int_fast16_t instead of pcl::int_fast16_t") = std::int_fast16_t;
-
namespace detail {
/**
* \brief int_type::type refers to an integral type that satisfies template parameters
* \brief Type used for indices in PCL
*/
using Indices = IndicesAllocator<>;
+
+ /**
+ * \brief Type used for aligned vector of Eigen objects in PCL
+ */
+ template <typename T>
+ using AlignedVector = std::vector<T, Eigen::aligned_allocator<T>>;
} // namespace pcl
}
const auto data1_size = cloud1.data.size ();
cloud1.data.resize(data1_size + cloud2.data.size ());
- for (std::size_t cp = 0; cp < size2; ++cp)
+ for (uindex_t cp = 0; cp < size2; ++cp)
{
for (const auto& field_data: valid_fields)
{
* \author: Qinghua Li (qinghua__li@163.com)
*/
-#include <pcl/common/eigen.h>
#include <pcl/range_image/bearing_angle_image.h>
namespace pcl
#include <pcl/point_types.h>
#include <pcl/common/colors.h>
+#include <array>
+
/// Glasbey lookup table
-static const unsigned char GLASBEY_LUT[] =
+static constexpr std::array<unsigned char, 256 * 3> GLASBEY_LUT =
{
77 , 175, 74 ,
228, 26 , 28 ,
153, 61 , 225,
237, 87 , 255,
87 , 24 , 206,
- 117, 143, 207,
+ 117, 143, 207
};
/// Viridis lookup table
-static const unsigned char VIRIDIS_LUT[] =
+static constexpr std::array<unsigned char, 256 * 3> VIRIDIS_LUT =
{
68 , 1 , 84 ,
68 , 2 , 85 ,
247, 230, 31 ,
249, 231, 33 ,
251, 231, 35 ,
- 254, 231, 36 ,
+ 254, 231, 36
};
/// Number of colors in Glasbey lookup table
-static const std::size_t GLASBEY_LUT_SIZE = sizeof (GLASBEY_LUT) / (sizeof (GLASBEY_LUT[0]) * 3);
+static constexpr std::size_t GLASBEY_LUT_SIZE = GLASBEY_LUT.size() / 3;
/// Number of colors in Viridis lookup table
-static const std::size_t VIRIDIS_LUT_SIZE = sizeof (VIRIDIS_LUT) / (sizeof (VIRIDIS_LUT[0]) * 3);
+static constexpr std::size_t VIRIDIS_LUT_SIZE = VIRIDIS_LUT.size() / 3;
-static const unsigned char* LUTS[] = { GLASBEY_LUT, VIRIDIS_LUT };
-static const std::size_t LUT_SIZES[] = { GLASBEY_LUT_SIZE, VIRIDIS_LUT_SIZE };
+static constexpr std::array<const std::array<unsigned char, 256 * 3>*, 2> LUTS = { &GLASBEY_LUT, &VIRIDIS_LUT };
+static constexpr std::array<std::size_t, 2> LUT_SIZES = { GLASBEY_LUT_SIZE, VIRIDIS_LUT_SIZE };
template<typename pcl::ColorLUTName T> pcl::RGB
pcl::ColorLUT<T>::at (std::size_t color_id)
{
assert (color_id < LUT_SIZES[T]);
pcl::RGB color;
- color.r = LUTS[T][color_id * 3 + 0];
- color.g = LUTS[T][color_id * 3 + 1];
- color.b = LUTS[T][color_id * 3 + 2];
- return (color);
+ color.r = (*LUTS[T])[color_id * 3 + 0];
+ color.g = (*LUTS[T])[color_id * 3 + 1];
+ color.b = (*LUTS[T])[color_id * 3 + 2];
+ return color;
}
template<typename pcl::ColorLUTName T> std::size_t
template<typename pcl::ColorLUTName T> const unsigned char*
pcl::ColorLUT<T>::data ()
{
- return LUTS[T];
+ return LUTS[T]->data();
}
pcl::RGB
#include <limits>
+#include <pcl/PCLPointCloud2.h> // for PCLPointCloud2
#include <pcl/common/common.h>
#include <pcl/console/print.h>
}
const auto field_idx = std::distance(cloud.fields.begin (), result);
- for (unsigned int i = 0; i < cloud.fields[field_idx].count; ++i)
+ for (uindex_t i = 0; i < cloud.fields[field_idx].count; ++i)
{
float data;
// TODO: replace float with the real data type
{
output.width = input.width;
output.height = input.height;
- output.points.resize (input.height * input.width);
+ output.resize (input.height * input.width);
}
unaliased_input = &input;
}
{
output.width = input.width;
output.height = input.height;
- output.points.resize (input.height * input.width);
+ output.resize (input.height * input.width);
}
unaliased_input = &input;
}
*
*/
-#include <pcl/point_types.h>
#include <pcl/common/io.h>
//////////////////////////////////////////////////////////////////////////
// Iterate over each point and perform the appropriate memcpys
int point_offset = 0;
- for (std::size_t cp = 0; cp < cloud_out.width * cloud_out.height; ++cp)
+ for (uindex_t cp = 0; cp < cloud_out.width * cloud_out.height; ++cp)
{
memcpy (&cloud_out.data[point_offset], &cloud2.data[cp * cloud2.point_step], cloud2.point_step);
int field_offset = cloud2.point_step;
return (true);
}
-//////////////////////////////////////////////////////////////////////////
-bool
-pcl::concatenatePointCloud (const pcl::PCLPointCloud2 &cloud1,
- const pcl::PCLPointCloud2 &cloud2,
- pcl::PCLPointCloud2 &cloud_out)
-{
- //if one input cloud has no points, but the other input does, just return the cloud with points
- if (cloud1.width*cloud1.height == 0 && cloud2.width*cloud2.height > 0)
- {
- cloud_out = cloud2;
- return (true);
- }
- if (cloud1.width*cloud1.height > 0 && cloud2.width*cloud2.height == 0)
- {
- cloud_out = cloud1;
- return (true);
- }
-
- bool strip = false;
- for (const auto &field : cloud1.fields)
- if (field.name == "_")
- strip = true;
-
- for (const auto &field : cloud2.fields)
- if (field.name == "_")
- strip = true;
-
- if (!strip && cloud1.fields.size () != cloud2.fields.size ())
- {
- PCL_ERROR ("[pcl::concatenatePointCloud] Number of fields in cloud1 (%u) != Number of fields in cloud2 (%u)\n", cloud1.fields.size (), cloud2.fields.size ());
- return (false);
- }
-
- // Copy cloud1 into cloud_out
- cloud_out = cloud1;
- std::size_t nrpts = cloud_out.data.size ();
- // Height = 1 => no more organized
- cloud_out.width = cloud1.width * cloud1.height + cloud2.width * cloud2.height;
- cloud_out.height = 1;
- if (!cloud1.is_dense || !cloud2.is_dense)
- cloud_out.is_dense = false;
- else
- cloud_out.is_dense = true;
-
- // We need to strip the extra padding fields
- if (strip)
- {
- // Get the field sizes for the second cloud
- std::vector<pcl::PCLPointField> fields2;
- std::vector<int> fields2_sizes;
- for (const auto &field : cloud2.fields)
- {
- if (field.name == "_")
- continue;
-
- fields2_sizes.push_back (field.count *
- pcl::getFieldSize (field.datatype));
- fields2.push_back (field);
- }
-
- cloud_out.data.resize (nrpts + (cloud2.width * cloud2.height) * cloud_out.point_step);
-
- // Copy the second cloud
- for (std::size_t cp = 0; cp < cloud2.width * cloud2.height; ++cp)
- {
- int i = 0;
- for (std::size_t j = 0; j < fields2.size (); ++j)
- {
- if (cloud1.fields[i].name == "_")
- {
- ++i;
- continue;
- }
-
- // We're fine with the special RGB vs RGBA use case
- if ((cloud1.fields[i].name == "rgb" && fields2[j].name == "rgba") ||
- (cloud1.fields[i].name == "rgba" && fields2[j].name == "rgb") ||
- (cloud1.fields[i].name == fields2[j].name))
- {
- memcpy (reinterpret_cast<char*> (&cloud_out.data[nrpts + cp * cloud1.point_step + cloud1.fields[i].offset]),
- reinterpret_cast<const char*> (&cloud2.data[cp * cloud2.point_step + cloud2.fields[j].offset]),
- fields2_sizes[j]);
- ++i; // increment the field size i
- }
- }
- }
- }
- else
- {
- for (std::size_t i = 0; i < cloud1.fields.size (); ++i)
- {
- // We're fine with the special RGB vs RGBA use case
- if ((cloud1.fields[i].name == "rgb" && cloud2.fields[i].name == "rgba") ||
- (cloud1.fields[i].name == "rgba" && cloud2.fields[i].name == "rgb"))
- continue;
- // Otherwise we need to make sure the names are the same
- if (cloud1.fields[i].name != cloud2.fields[i].name)
- {
- PCL_ERROR ("[pcl::concatenatePointCloud] Name of field %d in cloud1, %s, does not match name in cloud2, %s\n", i, cloud1.fields[i].name.c_str (), cloud2.fields[i].name.c_str ());
- return (false);
- }
- }
-
- cloud_out.data.resize (nrpts + cloud2.data.size ());
- memcpy (&cloud_out.data[nrpts], &cloud2.data[0], cloud2.data.size ());
- }
- return (true);
-}
-
//////////////////////////////////////////////////////////////////////////
bool
pcl::getPointCloudAsEigen (const pcl::PCLPointCloud2 &in, Eigen::MatrixXf &out)
void
pcl::copyPointCloud (
const pcl::PCLPointCloud2 &cloud_in,
- const IndicesAllocator< Eigen::aligned_allocator<int> > &indices,
+ const IndicesAllocator< Eigen::aligned_allocator<index_t> > &indices,
pcl::PCLPointCloud2 &cloud_out)
{
cloud_out.header = cloud_in.header;
int
pcl::console::parse_argument (int argc, const char * const * argv, const char * str, double &val)
{
- return parse_generic(strtod, argc, argv, str, val);
+ // added lambda wrapper for `strtod` to handle noexcept-type warning in GCC 7,
+ // refer to: https://stackoverflow.com/questions/46798456/handling-gccs-noexcept-type-warning
+ const auto strtod_l = [](const char *str, char **str_end){ return strtod(str, str_end); };
+ return parse_generic(strtod_l, argc, argv, str, val);
}
////////////////////////////////////////////////////////////////////////////////
int
pcl::console::parse_argument (int argc, const char * const * argv, const char * str, float &val)
{
- return parse_generic(strtof, argc, argv, str, val);
+ // added lambda wrapper for `strtof` to handle noexcept-type warning in GCC 7,
+ // refer to: https://stackoverflow.com/questions/46798456/handling-gccs-noexcept-type-warning
+ const auto strtof_l = [](const char *str, char **str_end){ return strtof(str, str_end); };
+ return parse_generic(strtof_l, argc, argv, str, val);
}
////////////////////////////////////////////////////////////////////////////////
return (os);
}
+ std::ostream&
+ operator << (std::ostream& os, const PointXYZLAB& p)
+ {
+ os << "(" << p.x << "," << p.y << "," << p.z << " - " << p.L << " , " << p.a << " , " << p.b << ")";
+ return (os);
+ }
+
std::ostream&
operator << (std::ostream& os, const PointXYZHSV& p)
{
operator << (std::ostream& os, const PointXYZRGBNormal& p)
{
os << "(" << p.x << "," << p.y << "," << p.z << " - "<< p.rgb << " - " << p.normal[0] << "," << p.normal[1] << "," << p.normal[2] << " - "
- << static_cast<int>(p.r) << ", "
- << static_cast<int>(p.g) << ", "
+ << static_cast<int>(p.r) << ","
+ << static_cast<int>(p.g) << ","
<< static_cast<int>(p.b) << " - "
<< p.curvature << ")";
return (os);
*/
#include <pcl/console/print.h>
#include <algorithm>
+#include <cstdarg> // for va_list, va_start, va_end
#include <cstdlib>
#include <cctype> // for toupper
#include <map>
* POSSIBILITY OF SUCH DAMAGE.
*/
-#include <iostream>
#include <cmath>
+#include <pcl/PCLPointCloud2.h> // for PCLPointCloud2
#include <pcl/common/time.h> // for MEASURE_FUNCTION_TIME
-#include <pcl/common/eigen.h>
#include <pcl/range_image/range_image.h>
namespace pcl
half_image.width = width/2;
half_image.height = height/2;
half_image.is_dense = is_dense;
- half_image.points.clear ();
- half_image.points.resize (half_image.width*half_image.height);
+ half_image.clear ();
+ half_image.resize (half_image.width*half_image.height);
int src_start_x = 2*half_image.image_offset_x_ - image_offset_x_,
src_start_y = 2*half_image.image_offset_y_ - image_offset_y_;
sub_image.width = sub_image_width;
sub_image.height = sub_image_height;
sub_image.is_dense = is_dense;
- sub_image.points.clear ();
- sub_image.points.resize (sub_image.width*sub_image.height);
+ sub_image.clear ();
+ sub_image.resize (sub_image.width*sub_image.height);
int src_start_x = combine_pixels*sub_image.image_offset_x_ - image_offset_x_,
src_start_y = combine_pixels*sub_image.image_offset_y_ - image_offset_y_;
vp_z_offset = point_cloud_data.fields[vp_z_idx].offset,
distance_offset = point_cloud_data.fields[distance_idx].offset;
- for (std::size_t point_idx = 0; point_idx < point_cloud_data.width*point_cloud_data.height; ++point_idx)
+ for (uindex_t point_idx = 0; point_idx < point_cloud_data.width*point_cloud_data.height; ++point_idx)
{
float x = *reinterpret_cast<const float*> (data+x_offset),
y = *reinterpret_cast<const float*> (data+y_offset),
PointWithViewpoint point;
point.x=distance; point.y=y; point.z=z;
point.vp_x=vp_x; point.vp_y=vp_y; point.vp_z=vp_z;
- far_ranges.points.push_back (point);
+ far_ranges.push_back (point);
}
}
far_ranges.width= far_ranges.size (); far_ranges.height = 1;
if(CMAKE_COMPILER_IS_GNUCXX)
string(REPLACE "-Wold-style-cast" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
string(REPLACE "-Wno-invalid-offsetof" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-conversion -Wno-unused-parameter -Wno-unused-variable -Wno-unused-function -Wno-unused-but-set-variable")
+ string(APPEND CMAKE_CXX_FLAGS " -Wno-conversion -Wno-unused-parameter -Wno-unused-variable -Wno-unused-function -Wno-unused-but-set-variable")
+ # allow deprecation warnings in Eigen(3.3.7)/Core, see here: https://gitlab.kitware.com/vtk/vtk/-/issues/17661
+ string(APPEND CMAKE_CXX_FLAGS " -Wno-error=cpp")
endif()
collect_subproject_directory_names("${CMAKE_CURRENT_SOURCE_DIR}" "CMakeLists.txt" PCL_CUDA_MODULES_NAMES PCL_CUDA_MODULES_DIRS)
{
pcl::PointCloud<pcl::PointXYZRGB>::Ptr output (new pcl::PointCloud<pcl::PointXYZRGB>);
PointCloudAOS<Host> data_host;
- data_host.points.resize (cloud->points.size());
+ data_host.resize (cloud->points.size());
for (std::size_t i = 0; i < cloud->points.size (); ++i)
{
PointXYZRGB pt;
pcl::ScopeTime ttt ("all");
pcl::PointCloud<pcl::PointXYZRGB>::Ptr output (new pcl::PointCloud<pcl::PointXYZRGB>);
PointCloudAOS<Host> data_host;
- data_host.points.resize (cloud->points.size());
+ data_host.resize (cloud->points.size());
for (std::size_t i = 0; i < cloud->points.size (); ++i)
{
PointXYZRGB pt;
{
pcl::PointCloud<pcl::PointXYZRGB>::Ptr output (new pcl::PointCloud<pcl::PointXYZRGB>);
PointCloudAOS<Host> data_host;
- data_host.points.resize (cloud->points.size());
+ data_host.resize (cloud->points.size());
for (std::size_t i = 0; i < cloud->points.size (); ++i)
{
PointXYZRGB pt;
{
pcl::PointCloud<pcl::PointXYZRGB>::Ptr output (new pcl::PointCloud<pcl::PointXYZRGB>);
PointCloudAOS<Host> data_host;
- data_host.points.resize (cloud->points.size());
+ data_host.resize (cloud->points.size());
for (std::size_t i = 0; i < cloud->points.size (); ++i)
{
PointXYZRGB pt;
{
points = rhs.points;
// TODO: Test speed on operator () = vs resize+copy
- //points.resize (rhs.points.size ());
- //thrust::copy (rhs.points.begin (), rhs.points.end (), points.begin ());
+ //points.resize (rhs.size ());
+ //thrust::copy (rhs.begin (), rhs.end (), points.begin ());
width = rhs.width;
height = rhs.height;
is_dense = rhs.is_dense;
applyFilter (PointCloud &output)
{
// Allocate enough space
- output.points.resize (input_->points.size ());
+ output.resize (input_->points.size ());
// Copy data
- Device<PointXYZRGB>::type::iterator nr_points = thrust::copy_if (input_->points.begin (), input_->points.end (), output.points.begin (), isFiniteAOS ());
- //Device<float3>::type::iterator nr_points = thrust::copy_if (input_->points.begin (), input_->points.end (), output.points.begin (), isFiniteAOS ());
- output.points.resize (nr_points - output.points.begin ());
+ Device<PointXYZRGB>::type::iterator nr_points = thrust::copy_if (input_->points.begin (), input_->points.end (), output.begin (), isFiniteAOS ());
+ //Device<float3>::type::iterator nr_points = thrust::copy_if (input_->points.begin (), input_->points.end (), output.begin (), isFiniteAOS ());
+ output.resize (nr_points - output.begin ());
//std::cerr << "[applyFilterAOS]: ";
- //std::cerr << input_->points.size () << " " << output.points.size () << std::endl;
+ //std::cerr << input_->points.size () << " " << output.size () << std::endl;
}
};
applyFilter (PointCloud &output)
{
// Allocate enough space
- output.points.resize (input_->points.size ());
+ output.resize (input_->points.size ());
// Copy data
- Device<PointXYZRGB>::type::iterator nr_points = thrust::copy_if (input_->points.begin (), input_->points.end (), output.points.begin (), isFiniteAOS ());
- output.points.resize (nr_points - output.points.begin ());
+ Device<PointXYZRGB>::type::iterator nr_points = thrust::copy_if (input_->points.begin (), input_->points.end (), output.begin (), isFiniteAOS ());
+ output.resize (nr_points - output.begin ());
//std::cerr << "[applyFilterAOS]: ";
- //std::cerr << input_->points.size () << " " << output.points.size () << std::endl;
+ //std::cerr << input_->points.size () << " " << output.size () << std::endl;
}
};
set(SUBSYS_PATH cuda/io)
set(SUBSYS_DESC "Point cloud CUDA IO library")
set(SUBSYS_DEPS cuda_common io common)
+set(SUBSYS_EXT_DEPS openni)
# ---[ Point Cloud Library - pcl/cuda/io
set(build TRUE)
PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" OFF)
mark_as_advanced("BUILD_${SUBSYS_NAME}")
-PCL_SUBSYS_DEPEND(build "${SUBSYS_NAME}" DEPS ${SUBSYS_DEPS})
+PCL_SUBSYS_DEPEND(build "${SUBSYS_NAME}" DEPS ${SUBSYS_DEPS} EXT_DEPS ${SUBSYS_EXT_DEPS})
PCL_SET_SUBSYS_INCLUDE_DIR("${SUBSYS_NAME}" "${SUBSYS_PATH}")
-if(NOT (build AND HAVE_OPENNI))
+if(NOT build)
return()
endif()
include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include")
PCL_CUDA_ADD_LIBRARY(${LIB_NAME} COMPONENT ${SUBSYS_NAME} SOURCES ${srcs} ${incs})
-set(EXT_DEPS "")
-#set(EXT_DEPS CUDA)
-PCL_MAKE_PKGCONFIG(${LIB_NAME} COMPONENT ${SUBSYS_NAME} DESC ${SUBSYS_DESC} PCL_DEPS ${SUBSYS_DEPS} EXT_DEPS ${EXT_DEPS})
+PCL_MAKE_PKGCONFIG(${LIB_NAME} COMPONENT ${SUBSYS_NAME} DESC ${SUBSYS_DESC} PCL_DEPS ${SUBSYS_DEPS} EXT_DEPS ${SUBSYS_EXT_DEPS})
# Install include files
PCL_ADD_INCLUDES("${SUBSYS_NAME}" "${SUBSYS_PATH}" ${incs})
void
fromPCL (const pcl::PointCloud<pcl::PointXYZRGB> &input, PointCloudAOS<Host> &output)
{
-// output.points.resize (input.points.size());
-// for (std::size_t i = 0; i < input.points.size (); ++i)
+// output.resize (input.size());
+// for (std::size_t i = 0; i < input.size (); ++i)
// {
// output[i].x = input[i].x;
// output[i].y = input[i].y;
// // Pack RGB into a float
// output[i].rgb = *(float*)(&input[i].rgb);
// }
-// thrust::copy (output.points.begin(), output.points.end (), input.points.begin());
+// thrust::copy (output.begin(), output.end (), input.begin());
// output.width = input.width;
// output.height = input.height;
// output.is_dense = input.is_dense;
const thrust::host_vector<float4> &normals,
pcl::PointCloud<pcl::PointXYZRGBNormal> &output)
{
- output.points.resize (input.points.size ());
+ output.resize (input.points.size ());
for (std::size_t i = 0; i < input.points.size (); ++i)
{
output[i].x = input.points[i].x;
toPCL (const PointCloudAOS<Host> &input,
pcl::PointCloud<pcl::PointXYZRGB> &output)
{
- output.points.resize (input.points.size ());
+ output.resize (input.points.size ());
for (std::size_t i = 0; i < input.points.size (); ++i)
{
output[i].x = input.points[i].x;
output.height = input.height;
output.is_dense = input.is_dense;
-/* for (std::size_t i = 0; i < output.points.size (); ++i)
+/* for (std::size_t i = 0; i < output.size (); ++i)
std::cerr <<
output[i].x << " " <<
output[i].y << " " <<
OpenNIRGB DebayerBilinear<Storage>::operator () (int index) const
{
// get position
- int xIdx = index % width;
- int yIdx = index / width;
+ unsigned int xIdx = index % width;
+ unsigned int yIdx = index / width;
OpenNIRGB result;
{
if (input_->height == 1)
{
- PCL_ERROR ("[pcl::%s::radiusSearch] Input dataset is not organized!", getName ().c_str ());
+ PCL_ERROR ("[pcl::%s::radiusSearch] Input dataset is not organized!\n", getName ().c_str ());
return 0;
}
if (input_->height == 1)
{
- PCL_ERROR ("[pcl::%s::nearestKSearch] Input dataset is not organized!", getName ().c_str ());
+ PCL_ERROR ("[pcl::%s::nearestKSearch] Input dataset is not organized!\n", getName ().c_str ());
return 0;
}
template <template <typename> class Storage>
__inline__ __host__
- void create_scatter_stencil (int new_w, int new_h, int skip, int width, int height, typename Storage<int>::type &stencil)
+ void create_scatter_stencil (unsigned int new_w, unsigned int new_h, int skip, int width, int height, typename Storage<int>::type &stencil)
{
for (unsigned int i = 0; i < new_w * new_h; ++i)
{
To enable colorgcc, perform the following steps:
-.. code-block:: cmake
+.. code-block:: shell
cp /etc/colorgcc/colorgccrc $HOME/.colorgccrc
* edit the $HOME/.colorgccrc file, search for the following lines:
-.. code-block:: cmake
+.. code-block:: text
g++: /usr/bin/g++
gcc: /usr/bin/gcc
and replace them with:
-.. code-block:: cmake
+.. code-block:: text
g++: ccache /usr/bin/g++
gcc: ccache /usr/bin/gcc
* create a $HOME/bin or $HOME/sbin directory, and create the following softlinks in it
-.. code-block:: cmake
+.. code-block:: shell
ln -s /usr/bin/colorgcc c++
ln -s /usr/bin/colorgcc cc
An in-depth discussion about the PCL 2.x API can be found here.
Committing changes to the git master
------------------------------------
+------------------------------------
In order to oversee the commit messages more easier and that the changelist looks homogenous please keep the following format:
"* <fixed|bugfix|changed|new> X in @<classname>@ (#<bug number>)"
.. code-block:: cpp
#include <pcl/module_name/file_name.h>
- #incluce <pcl/module_name/impl/file_name.hpp>
+ #include <pcl/module_name/impl/file_name.hpp>
1.4. Defines & Macros
^^^^^^^^^^^^^^^^^^^^^
booktitle = {{IEEE International Conference on Robotics and Automation (ICRA)}},
month = {May 9-13},
year = {2011},
- address = {Shanghai, China}
+ address = {Shanghai, China},
+ publisher = {IEEE}
}
</pre>
<a href="http://www.pointclouds.org/assets/pdf/pcl_icra2011.pdf">Download PDF here</a>.
main (int argc, char** argv)
{
pcl::PointCloud<MyPointType> cloud;
- cloud.points.resize (2);
+ cloud.resize (2);
cloud.width = 2;
cloud.height = 1;
.. raw:: html
- <iframe title="Trimmed B-spline surface fitting" width="480" height="390" src="http://www.youtube.com/embed/trH2kWELvyw?rel=0" frameborder="0" allowfullscreen></iframe>
+ <iframe title="Trimmed B-spline surface fitting" width="480" height="390" src="https://www.youtube.com/embed/trH2kWELvyw?rel=0" frameborder="0" allowfullscreen></iframe>
Theoretical background
.. literalinclude:: sources/bspline_fitting/bspline_fitting.cpp
:language: cpp
:linenos:
- :lines: 1-220
+ :lines: 1-219
The explanation
For systems for which we do not offer precompiled binaries, you need to compile Point Cloud Library (PCL) from source. Here are the steps that you need to take:
Go to `Github <https://github.com/PointCloudLibrary/pcl/releases>`_ and download the version number of your choice.
-Uncompress the tar-bzip archive, e.g. (replace 1.7.2 with the correct version number)::
+Uncompress the tar-gzip archive, e.g. (replace 1.7.2 with the correct version number)::
- tar xvfj pcl-pcl-1.7.2.tar.gz
+ tar xvf pcl-pcl-1.7.2.tar.gz
Change the directory to the pcl-pcl-1.7.2 (replace 1.7.2 with the correct version number) directory, and create a build directory in there::
The following code libraries are **required** for the compilation and usage of the PCL libraries shown below:
.. note::
-pcl_* denotes all PCL libraries, meaning that the particular dependency is a strict requirement for the usage of anything in PCL.
+ pcl_* denotes all PCL libraries, meaning that the particular dependency is a strict requirement for the usage of anything in PCL.
+---------------------------------------------------------------+-----------------+-------------------------+-------------------+
| Logo | Library | Minimum version | Mandatory |
- ``pcl::console::TicToc`` is used for easy output of timing results.
- :ref:`voxelgrid` is being used (lines 66-73) to downsample the cloud and give a more equalized point density.
- :ref:`normal_estimation` is being used (lines 75-83) to estimate normals which will be appended to the point information;
- The Conditional Euclidean Clustering class will be templated with ``pcl::PoitnXYZINormal``, containing x, y, z, intensity, normal and curvature information to use in the condition function.
+ The Conditional Euclidean Clustering class will be templated with ``pcl::PointXYZINormal``, containing x, y, z, intensity, normal and curvature information to use in the condition function.
Lines 85-95 set up the Conditional Euclidean Clustering class for use:
- Clusters that make up less than 0.1% of the cloud's total points are considered too small.
- Clusters that make up more than 20% of the cloud's total points are considered too large.
- The resulting clusters are stored in the ``pcl::IndicesClusters`` format, which is an array of indices-arrays, indexing points of the input point cloud.
- - Too small clusters or too large clusters are not passed to the main output but can instead be retrieved in separate ``pcl::IndicesClusters`` data containers, but only is the class was initialized with TRUE.
+ - Too small clusters or too large clusters are not passed to the main output but can instead be retrieved in separate ``pcl::IndicesClusters`` data containers, but only if the class was initialized with TRUE.
Lines 12-49 show some examples of condition functions:
.. raw:: html
- <iframe title="Acquiring the convex hull of a planar surface" width="480" height="390" src="http://www.youtube.com/embed/J9CjWDgPDTM?rel=0" frameborder="0" allowfullscreen></iframe>
+ <iframe title="Acquiring the convex hull of a planar surface" width="480" height="390" src="https://www.youtube.com/embed/J9CjWDgPDTM?rel=0" frameborder="0" allowfullscreen></iframe>
The code
--------
.. raw:: html
- <iframe title="Cylinder model segmentation" width="480" height="390" src="http://www.youtube.com/embed/SjbEDEGAeTk?rel=0" frameborder="0" allowfullscreen></iframe>
+ <iframe title="Cylinder model segmentation" width="480" height="390" src="https://www.youtube.com/embed/SjbEDEGAeTk?rel=0" frameborder="0" allowfullscreen></iframe>
.. note::
The cylindrical model is not perfect due to the presence of noise in the data.
If you are using an Ensenso X device you have to calibrate the device before trying to run the PCL driver. If you don't you will get an error like this:
.. code-block:: cpp
+
Initialising nxLib
Opening Ensenso stereo camera id = 0
openDevice: NxLib error ExecutionFailed (17) occurred while accessing item /Execute.
.. raw:: html
- <iframe title="Extracting indices from a PointCloud" width="480" height="390" src="http://www.youtube.com/embed/ZTK7NR1Xx4c?rel=0" frameborder="0" allowfullscreen></iframe>
+ <iframe title="Extracting indices from a PointCloud" width="480" height="390" src="https://www.youtube.com/embed/ZTK7NR1Xx4c?rel=0" frameborder="0" allowfullscreen></iframe>
The code
--------
--- /dev/null
+.. _function_filter:
+
+Removing outliers using a custom non-destructive condition
+----------------------------------------------------------
+
+This document demonstrates how to use the FunctionFilter class to remove points from a PointCloud that do not satisfy a custom criteria. This is a cleaner
+and faster appraoch compared to ConditionalRemoval filter or a `custom Condition class <https://cpp-optimizations.netlify.app/pcl_filter/>`_.
+
+.. note::
+ Advanced users can use the FunctorFilter class that can provide a small but measurable speedup when used with a `lambda <https://en.cppreference.com/w/cpp/language/lambda>`_.
+
+The code
+--------
+
+First, create a file, let's say, sphere_removal.cpp in you favorite editor, and place the following inside it:
+
+.. literalinclude:: sources/function_filter/sphere_removal.cpp
+ :language: cpp
+ :linenos:
+
+The explanation
+---------------
+
+Now, let's break down the code piece by piece.
+
+In the following lines, we define the PointCloud structures, fill in the input cloud, and display its content to screen.
+
+.. literalinclude:: sources/function_filter/sphere_removal.cpp
+ :language: cpp
+ :lines: 10-21
+
+Then, we create the condition which a given point must satisfy so that it remains in our PointCloud. To do this we create a `std::function` which accepts a PointCloud by const reference and an index, and returns true only if the point lies inside a sphere. This is then used to build the filter
+
+.. literalinclude:: sources/function_filter/sphere_removal.cpp
+ :language: cpp
+ :lines: 23-34
+
+This last bit of code just applies the filter to our original PointCloud, and removes all of the points that do not satisfy the conditions we specified. Then it outputs all of the points remaining in the PointCloud.
+
+.. literalinclude:: sources/function_filter/sphere_removal.cpp
+ :language: cpp
+ :lines: 36-42
+
+Compiling and running the program
+---------------------------------
+
+Add the following lines to your CMakeLists.txt file:
+
+.. literalinclude:: sources/function_filter/CMakeLists.txt
+ :language: cmake
+ :linenos:
+
+After you have compiled the executable, you can run it. Simply do::
+
+ $ ./sphere_removal
+
+You will see something similar to::
+
+ Cloud before filtering:
+ -1.23392 1.81505 -0.968005
+ -0.00934529 1.36497 0.158734
+ 0.488435 1.96851 -0.0534078
+ 1.27135 1.16404 -1.00462
+ -0.249089 -0.0815883 1.13229
+ 0.448447 1.48914 1.78378
+ 1.14143 1.77363 1.68965
+ 1.08544 -1.01664 -1.13041
+ 1.1199 0.9951 -1.13308
+ 1.44268 -1.44434 -0.391739
+ Cloud after filtering:
+ -1.23392 1.81505 -0.968005
+ -0.00934529 1.36497 0.158734
+ 0.488435 1.96851 -0.0534078
+ 1.27135 1.16404 -1.00462
+ 1.14143 1.77363 1.68965
+ 1.08544 -1.01664 -1.13041
+ 1.1199 0.9951 -1.13308
+ 1.44268 -1.44434 -0.391739
.. literalinclude:: sources/global_hypothesis_verification/global_hypothesis_verification.cpp
:language: c++
- :lines: 389-399
+ :lines: 387-397
then, we run ICP on the ``instances`` wrt. the ``scene`` to obtain the ``registered_instances``:
.. literalinclude:: sources/global_hypothesis_verification/global_hypothesis_verification.cpp
:language: c++
- :lines: 401-431
+ :lines: 399-429
Hypotheses Verification
***********************
.. literalinclude:: sources/global_hypothesis_verification/global_hypothesis_verification.cpp
:language: c++
- :lines: 433-466
+ :lines: 431-465
``GlobalHypothesesVerification`` takes as input a list of ``registered_instances`` and a ``scene`` so we can ``verify()`` them
to get a ``hypotheses_mask``: this is a `bool` array where ``hypotheses_mask[i]`` is ``TRUE`` if ``registered_instances[i]`` is a
.. literalinclude:: sources/global_hypothesis_verification/global_hypothesis_verification.cpp
:language: c++
- :lines: 468-525
+ :lines: 467-524
Compiling and running the program
Windows is currently **not** officially supported for the GPU methods.
Checking CUDA Version
----------------
+---------------------
In order to run the code you will need a system with an Nvidia GPU, having CUDA Toolkit v9.2+ installed.
We will not be covering CUDA toolkit installation in this tutorial as there already exists many great tutorials for the same.
Checking C++ Version
----------------
+--------------------
The GPU methods in PCL require a min version of GCC 7 or Clang 6 onwards (min version unknown).
This will not be a problem if you are running Ubuntu 18.04 or later. However on Ubuntu 16.04, you will need to install GCC 7 or Clang 6 (lower versions not tested) manually because the versions available by default are: GCC 5 and Clang 3.8
$ sudo update-alternatives --config gcc
Installing Eigen
----------------
+----------------
You will also need Eigen v3.3.7+, since the previous versions are incompatible with the latest CUDA versions.
-If you are on Ubuntu 29+, then there is no issue since Eigen 3.3.7 is shipped by default.
+If you are on Ubuntu 20 or newer, then there is no issue since Eigen 3.3.7 is shipped by default.
On older versions Eigen v3.3.7 will need to be installed manually::
$ wget -qO- https://gitlab.com/libeigen/eigen/-/archive/3.3.7/eigen-3.3.7.tar.gz | tar xz
.. raw:: html
- <iframe width="560" height="315" src="http://www.youtube.com/embed/Wd4OM8wOO1E?rel=0" frameborder="0" allowfullscreen></iframe>
+ <iframe width="560" height="315" src="https://www.youtube.com/embed/Wd4OM8wOO1E?rel=0" frameborder="0" allowfullscreen></iframe>
This shows how to detect people with an Primesense device, the full version
working on oni and pcd files can be found in the git master.
.. literalinclude:: sources/gpu/people_detect/src/people_detect.cpp
:language: cpp
- :lines: 317-367
+ :lines: 319-369
First the GPU device is set, by default this is the first GPU found in the bus, but if you have multiple GPU's in
your system, this allows you to select a specific one.
.. literalinclude:: sources/gpu/people_detect/src/people_detect.cpp
:language: cpp
- :lines: 329-338
+ :lines: 331-340
The implementation is based on a similar approach as Shotton et al. and thus needs off-line learned random
decision forests for labeling. The current implementation allows up to 4 decision trees to be loaded into
.. literalinclude:: sources/gpu/people_detect/src/people_detect.cpp
:language: cpp
- :lines: 340-341
+ :lines: 342-343
An additional parameter allows you to configure the number of trees to be loaded.
.. literalinclude:: sources/gpu/people_detect/src/people_detect.cpp
:language: cpp
- :lines: 350-352
+ :lines: 352-354
Then the RDF object is created, loading the trees upon creation.
.. literalinclude:: sources/gpu/people_detect/src/people_detect.cpp
:language: cpp
- :lines: 354-359
+ :lines: 356-361
Now we create the application object, give it the pointer to the RDF object and start the loop.
Now we'll have a look at the main loop.
.. literalinclude:: sources/gpu/people_detect/src/people_detect.cpp
:language: cpp
- :lines: 238-286
+ :lines: 237-288
This routine first connects a callback routine to the grabber and waits for valid data to arrive.
Each time the data arrives it will call the process function of the people detector, this is a
.. literalinclude:: sources/gpu/people_detect/src/people_detect.cpp
:language: cpp
- :lines: 141-178
+ :lines: 140-177
-Line 144 calls the RDF getLabels method which returns the labels on the device, these however
+Line 143 calls the RDF getLabels method which returns the labels on the device, these however
are a discrete enum of the labels and are visually hard to recognize, so these are converted to
-colors that illustrate each body part in line 145.
+colors that illustrate each body part in line 144.
At this point the results are still stored in the device memory and need to be copied to the CPU
-host memory, this is done in line 151. Afterwards the images are shown and stored to disk.
+host memory, this is done in line 150. Afterwards the images are shown and stored to disk.
Compiling and running the program
---------------------------------
.. raw:: html
- <iframe title="Surface Triangulation and Point Cloud Classification" width="480" height="390" src="http://www.youtube.com/embed/VALTnZCyWc0?rel=0" frameborder="0" allowfullscreen></iframe>
+ <iframe title="Surface Triangulation and Point Cloud Classification" width="480" height="390" src="https://www.youtube.com/embed/VALTnZCyWc0?rel=0" frameborder="0" allowfullscreen></iframe>
Background: algorithm and parameters
------------------------------------
+=============+================================================+
| Foo | a class named `Foo` |
+-------------+------------------------------------------------+
-| FooPtr | a shared pointer to a class `Foo`, |
+| FooPtr | a shared pointer to a class `Foo`, |
| | |
-| | e.g., `shared_ptr<Foo>` |
+| | e.g., `shared_ptr<Foo>` |
+-------------+------------------------------------------------+
-| FooConstPtr | a const shared pointer to a class `Foo`, |
+| FooConstPtr | a const shared pointer to a class `Foo`, |
| | |
-| | e.g., `const shared_ptr<const Foo>` |
+| | e.g., `const shared_ptr<const Foo>` |
+-------------+------------------------------------------------+
How to pass the input
Introduction
------------
+------------
The following links describe a set of basic PCL tutorials. Please note that
their source codes may already be provided as part of the PCL regular releases,
-----------------------------------------
In this tutorial we will learn how to transform a point cloud using a 4x4 matrix.
-We will apply a rotation and a translation to a loaded point cloud and display then
+We will apply a rotation and a translation to a loaded point cloud and display the
result.
This program is able to load one PCD or PLY file; apply a matrix transformation on it
.. literalinclude:: sources/min_cut_segmentation/min_cut_segmentation.cpp
:language: cpp
- :lines: 18-23
+ :lines: 18-19
-This few lines are not necessary. Their only purpose is to show that ``pcl::MinCutSegmentation`` class can work with indices.
+The purpose of these lines is to show that ``pcl::MinCutSegmentation`` class can work with indices. Here, only the valid points are chosen for segmentation.
.. literalinclude:: sources/min_cut_segmentation/min_cut_segmentation.cpp
:language: cpp
- :lines: 25-25
+ :lines: 21-21
Here is the line where the instantiation of the ``pcl::MinCutSegmentation`` class takes place.
It is the tamplate class that has only one parameter - PointT - which says what type of points will be used.
.. literalinclude:: sources/min_cut_segmentation/min_cut_segmentation.cpp
:language: cpp
- :lines: 26-27
+ :lines: 22-23
These lines provide the algorithm with the cloud that must be segmented and the indices.
.. literalinclude:: sources/min_cut_segmentation/min_cut_segmentation.cpp
:language: cpp
- :lines: 29-35
+ :lines: 25-31
As mentioned before, algorithm requires point that is known to be the objects center. These lines provide it.
.. literalinclude:: sources/min_cut_segmentation/min_cut_segmentation.cpp
:language: cpp
- :lines: 37-38
+ :lines: 33-34
These lines set :math:`\sigma` and objects radius required for smooth cost calculation.
.. literalinclude:: sources/min_cut_segmentation/min_cut_segmentation.cpp
:language: cpp
- :lines: 39-39
+ :lines: 35-35
This line tells how much neighbours to find when constructing the graph. The more neighbours is set, the more number of edges it will contain.
.. literalinclude:: sources/min_cut_segmentation/min_cut_segmentation.cpp
:language: cpp
- :lines: 40-40
+ :lines: 36-36
Here is the line where foreground penalty is set.
.. literalinclude:: sources/min_cut_segmentation/min_cut_segmentation.cpp
:language: cpp
- :lines: 42-43
+ :lines: 38-39
These lines are responsible for launching the algorithm. After the segmentation clusters will contain the result.
.. literalinclude:: sources/min_cut_segmentation/min_cut_segmentation.cpp
:language: cpp
- :lines: 45-45
+ :lines: 41-41
You can easily access the flow value that was computed during the graph cut. This is exactly what happening here.
.. literalinclude:: sources/min_cut_segmentation/min_cut_segmentation.cpp
:language: cpp
- :lines: 47-52
+ :lines: 43-48
These lines simply create the instance of ``CloudViewer`` class for result visualization.
{
const std::size_t nr_points = cloud->size ();
- cloud_buffers.points.resize (nr_points*3);
+ cloud_buffers.resize (nr_points*3);
cloud_buffers.rgb.resize (nr_points*3);
const pcl::PointXYZ bounds_min (-0.9, -0.8, 1.0);
j++;
}
- cloud_buffers.points.resize (j * 3);
+ cloud_buffers.resize (j * 3);
cloud_buffers.rgb.resize (j * 3);
}
.. raw:: html
- <iframe width="425" height="349" src="http://www.youtube.com/embed/x1FSssJrfik" frameborder="0" allowfullscreen></iframe>
+ <iframe width="425" height="349" src="https://www.youtube.com/embed/x1FSssJrfik" frameborder="0" allowfullscreen></iframe>
Theoretical primer
------------------
Now, let's explain the code in detail.
-We fist define and instantiate a shared PointCloud structure and fill it with random points.
+We first define and instantiate a shared PointCloud structure and fill it with random points.
.. literalinclude:: sources/octree_search/octree_search.cpp
:language: cpp
:language: cpp
:lines: 27-32
-Once the PointCloud is associated with an octree, we can perform search operations. The fist search method used here is "Neighbors within Voxel Search". It assigns the search point to the corresponding
+Once the PointCloud is associated with an octree, we can perform search operations. The first search method used here is "Neighbors within Voxel Search". It assigns the search point to the corresponding
leaf node voxel and returns a vector of point indices. These indices relate to points which fall within the same voxel. The distance between
the search point and the search result depend therefore on the resolution parameter of the octree.
Now, let's discuss the code in detail.
-We fist instantiate the OctreePointCloudChangeDetector class and define its voxel resolution.
+We first instantiate the OctreePointCloudChangeDetector class and define its voxel resolution.
.. literalinclude:: sources/octree_change_detection/octree_change_detection.cpp
:language: cpp
.. raw:: html
- <iframe title="PCL OpenNI Viewer example" width="480" height="390" src="http://www.youtube.com/embed/x3SaWQkPsPI?rel=0" frameborder="0" allowfullscreen></iframe>
+ <iframe title="PCL OpenNI Viewer example" width="480" height="390" src="https://www.youtube.com/embed/x3SaWQkPsPI?rel=0" frameborder="0" allowfullscreen></iframe>
So let's look at the code. From *visualization/tools/openni_viewer_simple.cpp*
* **DATA** - specifies the data type that the point cloud data is stored in. As
- of version 0.7, two data types are supported: *ascii* and *binary*. See the
+ of version 0.7, three data types are supported: *ascii*, *binary*, and *binary_compressed*. See the
next section for more details.
Data storage types
------------------
-As of version 0.7, the **.PCD** file format uses two different modes for storing data:
+As of version 0.7, the **.PCD** file format uses three different modes for storing data:
* in **ASCII** form, with each point on a new line::
`pcl::PointCloud.points` array/vector. On Linux systems, we use `mmap`/`munmap`
operations for the fastest possible read/write access to the data.
+* in **binary_compressed** form. The body (everything after the header) starts with a 32 bit unsigned binary number which specifies the size in bytes of the data in *compressed* form. Next is another 32 bit unsigned binary number which specifies the size in bytes of the data in *uncompressed* form. Then follows the compressed data. The compression and decompression is done using Marc Lehmann's LZF algorithm. It is mediocre in terms of size reduction, but very fast. For typical point clouds, the compressed data has 30 to 60 percent of the original size. Before compressing, the data is reordered to improve compression, from the standard array-of-structures layout to a structure-of-arrays layout. So for example a cloud with three points and fields x, y, z would be reordered from xyzxyzxyz to xxxyyyzzz.
+
Storing point cloud data in both a simple ascii form with each point on a line,
space or tab separated, without any other characters on it, as well as in a
.. raw:: html
- <iframe width="420" height="315" src="http://www.youtube.com/embed/0kPwTds7HSk" frameborder="0" allowfullscreen></iframe>
+ <iframe width="420" height="315" src="https://www.youtube.com/embed/0kPwTds7HSk" frameborder="0" allowfullscreen></iframe>
.. raw:: html
- <iframe width="480" height="270" src="http://www.youtube.com/embed/2Xgd67nkwzs" frameborder="0" allowfullscreen></iframe>
+ <iframe width="480" height="270" src="https://www.youtube.com/embed/2Xgd67nkwzs" frameborder="0" allowfullscreen></iframe>
.. raw:: html
- <iframe title="Planar model segmentation" width="480" height="390" src="http://www.youtube.com/embed/ZTK7NR1Xx4c?rel=0" frameborder="0" allowfullscreen></iframe>
+ <iframe title="Planar model segmentation" width="480" height="390" src="https://www.youtube.com/embed/ZTK7NR1Xx4c?rel=0" frameborder="0" allowfullscreen></iframe>
The code
--------
.. important::
- Please visit http://docs.pointclouds.org/trunk/a02954.html
+ Please visit https://pointclouds.org/documentation/group__sample__consensus.html
for more information on various other implemented Sample Consensus models and
robust estimators.
The abbreviation of "RANdom SAmple Consensus" is RANSAC, and it is an iterative method that is used to estimate parameters of a mathematical model from a set of data containing outliers. This algorithm was published by Fischler and Bolles in 1981. The RANSAC algorithm assumes that all of the data we are looking at is comprised of both inliers and outliers. Inliers can be explained by a model with a particular set of parameter values, while outliers do not fit that model in any circumstance. Another necessary assumption is that a procedure which can optimally estimate the parameters of the chosen model from the data is available.
-From [Wikipedia]_:
+From [WikipediaRANSAC]_:
*The input to the RANSAC algorithm is a set of observed data values, a parameterized model which can explain or be fitted to the observations, and some confidence parameters.*
:align: right
:height: 200px
-The pictures to the left and right (From [Wikipedia]_) show a simple application of the RANSAC algorithm on a 2-dimensional set of data. The image on our left is a visual representation of a data set containing both inliers and outliers. The image on our right shows all of the outliers in red, and shows inliers in blue. The blue line is the result of the work done by RANSAC. In this case the model that we are trying to fit to the data is a line, and it looks like it's a fairly good fit to our data.
+The pictures to the left and right (From [WikipediaRANSAC]_) show a simple application of the RANSAC algorithm on a 2-dimensional set of data. The image on our left is a visual representation of a data set containing both inliers and outliers. The image on our right shows all of the outliers in red, and shows inliers in blue. The blue line is the result of the work done by RANSAC. In this case the model that we are trying to fit to the data is a line, and it looks like it's a fairly good fit to our data.
The code
--------
:align: center
:height: 400px
-.. [Wikipedia] http://en.wikipedia.org/wiki/RANSAC
+.. [WikipediaRANSAC] http://en.wikipedia.org/wiki/RANSAC
.. literalinclude:: sources/region_growing_rgb_segmentation/region_growing_rgb_segmentation.cpp
:language: cpp
- :lines: 34-34
+ :lines: 30-30
This line is responsible for ``pcl::RegionGrowingRGB`` instantiation. This class has two parameters:
.. literalinclude:: sources/region_growing_rgb_segmentation/region_growing_rgb_segmentation.cpp
:language: cpp
- :lines: 35-37
+ :lines: 31-33
These lines provide the instance with the input cloud, indices and search method.
.. literalinclude:: sources/region_growing_rgb_segmentation/region_growing_rgb_segmentation.cpp
:language: cpp
- :lines: 38-38
+ :lines: 34-34
Here the distance threshold is set. It is used to determine whether the point is neighbouring or not. If the point is located at a distance less than
the given threshold, then it is considered to be neighbouring. It is used for clusters neighbours search.
.. literalinclude:: sources/region_growing_rgb_segmentation/region_growing_rgb_segmentation.cpp
:language: cpp
- :lines: 39-39
+ :lines: 35-35
This line sets the color threshold. Just as angle threshold is used for testing points normals in ``pcl::RegionGrowing``
to determine if the point belongs to cluster, this value is used for testing points colors.
.. literalinclude:: sources/region_growing_rgb_segmentation/region_growing_rgb_segmentation.cpp
:language: cpp
- :lines: 40-40
+ :lines: 36-36
Here the color threshold for clusters is set. This value is similar to the previous, but is used when the merging process takes place.
.. literalinclude:: sources/region_growing_rgb_segmentation/region_growing_rgb_segmentation.cpp
:language: cpp
- :lines: 41-41
+ :lines: 37-37
This value is similar to that which was used in the :ref:`region_growing_segmentation` tutorial. In addition to that, it is used for merging process mentioned in the beginning.
If cluster has less points than was set through ``setMinClusterSize`` method, then it will be merged with the nearest neighbour.
.. literalinclude:: sources/region_growing_rgb_segmentation/region_growing_rgb_segmentation.cpp
:language: cpp
- :lines: 43-44
+ :lines: 39-40
Here is the place where the algorithm is launched. It will return the array of clusters when the segmentation process will be over.
.. literalinclude:: sources/region_growing_segmentation/region_growing_segmentation.cpp
:language: cpp
- :lines: 30-35
+ :lines: 30-31
-These lines are given only for example. You can safely comment this part. Insofar as ``pcl::RegionGrowing`` is derived from ``pcl::PCLBase``,
-it can work with indices. It means you can instruct that you only segment those points that are listed in the indices array instead of the whole point cloud.
+Insofar as ``pcl::RegionGrowing`` is derived from ``pcl::PCLBase``, it can work with indices. It means you can instruct that you only segment those points that are listed in the indices array instead of the whole point cloud. Here, only the valid points are chosen for segmentation.
.. literalinclude:: sources/region_growing_segmentation/region_growing_segmentation.cpp
:language: cpp
- :lines: 37-39
+ :lines: 33-35
You have finally reached the part where ``pcl::RegionGrowing`` is instantiated. It is a template class that has two parameters:
.. literalinclude:: sources/region_growing_segmentation/region_growing_segmentation.cpp
:language: cpp
- :lines: 40-44
+ :lines: 36-40
The algorithm needs K nearest search in its internal structure, so here is the place where a search method is provided
and number of neighbours is set. After that it receives the cloud that must be segmented, point indices and normals.
.. literalinclude:: sources/region_growing_segmentation/region_growing_segmentation.cpp
:language: cpp
- :lines: 45-46
+ :lines: 41-42
These two lines are the most important part in the algorithm initialization, because they are responsible for the mentioned smoothness constraint.
First method sets the angle in radians that will be used as the allowable range for the normals deviation.
.. literalinclude:: sources/region_growing_segmentation/region_growing_segmentation.cpp
:language: cpp
- :lines: 48-49
+ :lines: 44-45
This method simply launches the segmentation algorithm. After its work it will return clusters array.
.. literalinclude:: sources/region_growing_segmentation/region_growing_segmentation.cpp
:language: cpp
- :lines: 51-63
+ :lines: 47-59
These lines are simple enough, so they won't be commented. They are intended for those who are not familiar with how to work with ``pcl::PointIndices``
and how to access its elements.
.. literalinclude:: sources/region_growing_segmentation/region_growing_segmentation.cpp
:language: cpp
- :lines: 65-73
+ :lines: 61-69
The ``pcl::RegionGrowing`` class provides a method that returns the colored cloud where each cluster has its own color.
So in this part of code the ``pcl::visualization::CloudViewer`` is instantiated for viewing the result of the segmentation - the same colored cloud.
.. raw:: html
- <iframe title="Smoothing and normal estimation based on polynomial reconstruction" width="480" height="390" src="http://www.youtube.com/embed/FqHroDuo_I8?rel=0" frameborder="0" allowfullscreen></iframe>
+ <iframe title="Smoothing and normal estimation based on polynomial reconstruction" width="480" height="390" src="https://www.youtube.com/embed/FqHroDuo_I8?rel=0" frameborder="0" allowfullscreen></iframe>
Some of the data irregularities (caused by small distance measurement errors)
.. image:: images/resampling_1.jpg
-On the left side of the figure above, we see the effect or estimating surface
+On the left side of the figure above, we see the effect of estimating surface
normals in a dataset comprised of two registered point clouds together. Due to
alignment errors, the resultant normals are noisy. On the right side we see the
effects of surface normal estimation in the same dataset after it has been
.. raw:: html
- <iframe title="Removing noisy data through resampling" width="480" height="390" src="http://www.youtube.com/embed/N5AgC0KEcw0?rel=0" frameborder="0" allowfullscreen></iframe>
+ <iframe title="Removing noisy data through resampling" width="480" height="390" src="https://www.youtube.com/embed/N5AgC0KEcw0?rel=0" frameborder="0" allowfullscreen></iframe>
The code
--------
-cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
project("PCL_Tutorials")
-cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
project(alignment_prerejective)
#include <Eigen/Core>
+
#include <pcl/point_types.h>
#include <pcl/point_cloud.h>
#include <pcl/common/time.h>
#include <pcl/io/pcd_io.h>
#include <pcl/registration/icp.h>
#include <pcl/registration/sample_consensus_prerejective.h>
-#include <pcl/segmentation/sac_segmentation.h>
#include <pcl/visualization/pcl_visualizer.h>
// Types
-cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
project(bare_earth)
#include <pcl/segmentation/progressive_morphological_filter.h>
int
-main (int argc, char** argv)
+main ()
{
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud (new pcl::PointCloud<pcl::PointXYZ>);
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_filtered (new pcl::PointCloud<pcl::PointXYZ>);
-cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
project(bspline_fitting)
-cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
project(cloud_viewer)
-cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
project(cluster_extraction)
#include <pcl/filters/extract_indices.h>
#include <pcl/filters/voxel_grid.h>
#include <pcl/features/normal_3d.h>
-#include <pcl/kdtree/kdtree.h>
+#include <pcl/search/kdtree.h>
#include <pcl/sample_consensus/method_types.h>
#include <pcl/sample_consensus/model_types.h>
#include <pcl/segmentation/sac_segmentation.h>
int
-main (int argc, char** argv)
+main ()
{
// Read in the cloud data
pcl::PCDReader reader;
seg.setMaxIterations (100);
seg.setDistanceThreshold (0.02);
- int i=0, nr_points = (int) cloud_filtered->size ();
+ int nr_points = (int) cloud_filtered->size ();
while (cloud_filtered->size () > 0.3 * nr_points)
{
// Segment the largest planar component from the remaining cloud
for (std::vector<pcl::PointIndices>::const_iterator it = cluster_indices.begin (); it != cluster_indices.end (); ++it)
{
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_cluster (new pcl::PointCloud<pcl::PointXYZ>);
- for (std::vector<int>::const_iterator pit = it->indices.begin (); pit != it->indices.end (); ++pit)
- cloud_cluster->push_back ((*cloud_filtered)[*pit]); //*
+ for (const auto& idx : it->indices)
+ cloud_cluster->push_back ((*cloud_filtered)[idx]); //*
cloud_cluster->width = cloud_cluster->size ();
cloud_cluster->height = 1;
cloud_cluster->is_dense = true;
-cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
project(concatenate_clouds)
#include <iostream>
-#include <pcl/io/pcd_io.h>
#include <pcl/point_types.h>
+#include <pcl/common/io.h> // for concatenateFields
int
main (int argc, char** argv)
// Fill in the cloud data
cloud_a.width = 5;
cloud_a.height = cloud_b.height = n_cloud_b.height = 1;
- cloud_a.points.resize (cloud_a.width * cloud_a.height);
+ cloud_a.resize (cloud_a.width * cloud_a.height);
if (strcmp(argv[1], "-p") == 0)
{
cloud_b.width = 3;
- cloud_b.points.resize (cloud_b.width * cloud_b.height);
+ cloud_b.resize (cloud_b.width * cloud_b.height);
}
else{
n_cloud_b.width = 5;
- n_cloud_b.points.resize (n_cloud_b.width * n_cloud_b.height);
+ n_cloud_b.resize (n_cloud_b.width * n_cloud_b.height);
}
for (std::size_t i = 0; i < cloud_a.size (); ++i)
-cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
project(concatenate_fields)
#include <iostream>
-#include <pcl/io/pcd_io.h>
+#include <pcl/common/io.h> // for concatenateFields
#include <pcl/point_types.h>
int
- main (int argc, char** argv)
+ main ()
{
pcl::PointCloud<pcl::PointXYZ> cloud_a;
pcl::PointCloud<pcl::Normal> cloud_b;
// Fill in the cloud data
cloud_a.width = cloud_b.width = 5;
cloud_a.height = cloud_b.height = 1;
- cloud_a.points.resize (cloud_a.width * cloud_a.height);
- cloud_b.points.resize (cloud_b.width * cloud_b.height);
+ cloud_a.resize (cloud_a.width * cloud_a.height);
+ cloud_b.resize (cloud_b.width * cloud_b.height);
for (std::size_t i = 0; i < cloud_a.size (); ++i)
{
-cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
project(concatenate_points)
#include <iostream>
-#include <pcl/io/pcd_io.h>
+#include <pcl/point_cloud.h> // for PointCloud
#include <pcl/point_types.h>
int
- main (int argc, char** argv)
+ main ()
{
pcl::PointCloud<pcl::PointXYZ> cloud_a, cloud_b, cloud_c;
cloud_a.width = 5;
cloud_b.width = 3;
cloud_a.height = cloud_b.height = 1;
- cloud_a.points.resize (cloud_a.width * cloud_a.height);
- cloud_b.points.resize (cloud_b.width * cloud_b.height);
+ cloud_a.resize (cloud_a.width * cloud_a.height);
+ cloud_b.resize (cloud_b.width * cloud_b.height);
for (std::size_t i = 0; i < cloud_a.size (); ++i)
{
-cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
project(concave_hull_2d)
#include <pcl/surface/concave_hull.h>
int
-main (int argc, char** argv)
+main ()
{
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud (new pcl::PointCloud<pcl::PointXYZ>),
cloud_filtered (new pcl::PointCloud<pcl::PointXYZ>),
pcl::PCDReader reader;
reader.read ("table_scene_mug_stereo_textured.pcd", *cloud);
- // Build a filter to remove spurious NaNs
+ // Build a filter to remove spurious NaNs and scene background
pcl::PassThrough<pcl::PointXYZ> pass;
pass.setInputCloud (cloud);
pass.setFilterFieldName ("z");
-cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
project(conditional_euclidean_clustering)
typedef pcl::PointXYZINormal PointTypeFull;
bool
-enforceIntensitySimilarity (const PointTypeFull& point_a, const PointTypeFull& point_b, float squared_distance)
+enforceIntensitySimilarity (const PointTypeFull& point_a, const PointTypeFull& point_b, float /*squared_distance*/)
{
if (std::abs (point_a.intensity - point_b.intensity) < 5.0f)
return (true);
}
bool
-enforceCurvatureOrIntensitySimilarity (const PointTypeFull& point_a, const PointTypeFull& point_b, float squared_distance)
+enforceNormalOrIntensitySimilarity (const PointTypeFull& point_a, const PointTypeFull& point_b, float /*squared_distance*/)
{
Eigen::Map<const Eigen::Vector3f> point_a_normal = point_a.getNormalVector3fMap (), point_b_normal = point_b.getNormalVector3fMap ();
if (std::abs (point_a.intensity - point_b.intensity) < 5.0f)
return (true);
- if (std::abs (point_a_normal.dot (point_b_normal)) < 0.05)
+ if (std::abs (point_a_normal.dot (point_b_normal)) > std::cos (30.0f / 180.0f * static_cast<float> (M_PI)))
return (true);
return (false);
}
}
int
-main (int argc, char** argv)
+main ()
{
// Data containers used
pcl::PointCloud<PointTypeIO>::Ptr cloud_in (new pcl::PointCloud<PointTypeIO>), cloud_out (new pcl::PointCloud<PointTypeIO>);
std::cerr << ">> Done: " << tt.toc () << " ms\n";
// Using the intensity channel for lazy visualization of the output
- for (int i = 0; i < small_clusters->size (); ++i)
- for (int j = 0; j < (*small_clusters)[i].indices.size (); ++j)
- (*cloud_out)[(*small_clusters)[i].indices[j]].intensity = -2.0;
- for (int i = 0; i < large_clusters->size (); ++i)
- for (int j = 0; j < (*large_clusters)[i].indices.size (); ++j)
- (*cloud_out)[(*large_clusters)[i].indices[j]].intensity = +10.0;
- for (int i = 0; i < clusters->size (); ++i)
+ for (const auto& small_cluster : (*small_clusters))
+ for (const auto& j : small_cluster.indices)
+ (*cloud_out)[j].intensity = -2.0;
+ for (const auto& large_cluster : (*large_clusters))
+ for (const auto& j : large_cluster.indices)
+ (*cloud_out)[j].intensity = +10.0;
+ for (const auto& cluster : (*clusters))
{
int label = rand () % 8;
- for (int j = 0; j < (*clusters)[i].indices.size (); ++j)
- (*cloud_out)[(*clusters)[i].indices[j]].intensity = label;
+ for (const auto& j : cluster.indices)
+ (*cloud_out)[j].intensity = label;
}
// Save the output point cloud
-cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
project(conditional_removal)
#include <pcl/filters/conditional_removal.h>
int
- main (int argc, char** argv)
+ main ()
{
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud (new pcl::PointCloud<pcl::PointXYZ>);
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_filtered (new pcl::PointCloud<pcl::PointXYZ>);
-cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
project(convex_hull_2d)
#include <pcl/surface/convex_hull.h>
int
- main (int argc, char** argv)
+ main ()
{
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud (new pcl::PointCloud<pcl::PointXYZ>), cloud_filtered (new pcl::PointCloud<pcl::PointXYZ>), cloud_projected (new pcl::PointCloud<pcl::PointXYZ>);
pcl::PCDReader reader;
reader.read ("table_scene_mug_stereo_textured.pcd", *cloud);
- // Build a filter to remove spurious NaNs
+ // Build a filter to remove spurious NaNs and scene background
pcl::PassThrough<pcl::PointXYZ> pass;
pass.setInputCloud (cloud);
pass.setFilterFieldName ("z");
-cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
project(cylinder_segmentation)
typedef pcl::PointXYZ PointT;
int
-main (int argc, char** argv)
+main ()
{
// All the objects needed
pcl::PCDReader reader;
reader.read ("table_scene_mug_stereo_textured.pcd", *cloud);
std::cerr << "PointCloud has: " << cloud->size () << " data points." << std::endl;
- // Build a passthrough filter to remove spurious NaNs
+ // Build a passthrough filter to remove spurious NaNs and scene background
pass.setInputCloud (cloud);
pass.setFilterFieldName ("z");
pass.setFilterLimits (0, 1.5);
-cmake_minimum_required(VERSION 2.8 FATAL_ERROR)\r
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)\r
\r
project(pcl-davidSDK_images_viewer)\r
\r
-cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
project(don_segmentation)
for (std::vector<pcl::PointIndices>::const_iterator it = cluster_indices.begin (); it != cluster_indices.end (); ++it, j++)
{
pcl::PointCloud<PointNormal>::Ptr cloud_cluster_don (new pcl::PointCloud<PointNormal>);
- for (std::vector<int>::const_iterator pit = it->indices.begin (); pit != it->indices.end (); ++pit)
+ for (const auto& idx : it->indices)
{
- cloud_cluster_don->points.push_back ((*doncloud)[*pit]);
+ cloud_cluster_don->points.push_back ((*doncloud)[idx]);
}
cloud_cluster_don->width = cloud_cluster_don->size ();
-cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
project(pcl-ensenso_cloud_images_viewer)
-cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
project(extract_indices)
#include <pcl/filters/extract_indices.h>
int
-main (int argc, char** argv)
+main ()
{
pcl::PCLPointCloud2::Ptr cloud_blob (new pcl::PCLPointCloud2), cloud_filtered_blob (new pcl::PCLPointCloud2);
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_filtered (new pcl::PointCloud<pcl::PointXYZ>), cloud_p (new pcl::PointCloud<pcl::PointXYZ>), cloud_f (new pcl::PointCloud<pcl::PointXYZ>);
--- /dev/null
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
+
+project(function_filter)
+
+find_package(PCL 1.11.1.99 REQUIRED)
+
+add_executable (sphere_removal sphere_removal.cpp)
+target_link_libraries (sphere_removal ${PCL_LIBRARIES})
+add_definitions(${PCL_DEFINITIONS})
--- /dev/null
+#include <pcl/common/generate.h>
+#include <pcl/filters/experimental/functor_filter.h>
+#include <pcl/point_types.h>
+
+#include <iostream>
+
+int
+main()
+{
+ using XYZCloud = pcl::PointCloud<pcl::PointXYZ>;
+ const auto cloud = pcl::make_shared<XYZCloud>();
+ const auto filtered_cloud = pcl::make_shared<XYZCloud>();
+
+ // Create a random generator to fill in the cloud
+ pcl::common::CloudGenerator<pcl::PointXYZ, pcl::common::UniformGenerator<float>>
+ generator{{-2.0, 2, 1234}};
+ generator.fill(10, 1, *cloud);
+
+ std::cerr << "Cloud before filtering: " << std::endl;
+ for (const auto& pt : *cloud)
+ std::cerr << " " << pt.x << " " << pt.y << " " << pt.z << std::endl;
+
+ // Setup a condition to reject points inside a filter
+ const Eigen::Vector3f center{0, 0, 2};
+ const float radius = 2;
+
+ pcl::experimental::FilterFunction<pcl::PointXYZ> filter;
+ filter = [=](const XYZCloud& cloud, pcl::index_t idx) {
+ return ((cloud[idx].getVector3fMap() - center).norm() >= radius);
+ };
+
+ // build the filter
+ pcl::experimental::FunctionFilter<pcl::PointXYZ> func_filter(filter);
+ func_filter.setInputCloud(cloud);
+
+ // apply filter
+ func_filter.filter(*filtered_cloud);
+
+ // display pointcloud after filtering
+ std::cerr << "Cloud after filtering: " << std::endl;
+ for (const auto& pt : *filtered_cloud)
+ std::cerr << " " << pt.x << " " << pt.y << " " << pt.z << std::endl;
+
+ return (0);
+}
GoHv.verify ();
GoHv.getMask (hypotheses_mask); // i-element TRUE if hvModels[i] verifies hypotheses
- for (int i = 0; i < hypotheses_mask.size (); i++)
+ for (std::size_t i = 0; i < hypotheses_mask.size (); i++)
{
if (hypotheses_mask[i])
{
-cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
project(people_detect)
depth_view_.setPosition (650, 0);
cmap_device_.create(ROWS, COLS);
- cmap_host_.points.resize(COLS * ROWS);
+ cmap_host_.resize(COLS * ROWS);
depth_device_.create(ROWS, COLS);
image_device_.create(ROWS, COLS);
- depth_host_.points.resize(COLS * ROWS);
+ depth_host_.resize(COLS * ROWS);
- rgba_host_.points.resize(COLS * ROWS);
+ rgba_host_.resize(COLS * ROWS);
rgb_host_.resize(COLS * ROWS * 3);
people::uploadColorMap(color_map_);
int c;
cmap_host_.width = cmap_device_.cols();
cmap_host_.height = cmap_device_.rows();
- cmap_host_.points.resize(cmap_host_.width * cmap_host_.height);
+ cmap_host_.resize(cmap_host_.width * cmap_host_.height);
cmap_device_.download(cmap_host_.points, c);
final_view_.showRGBImage<pcl::RGB>(cmap_host_);
{
depth_host_.width = people_detector_.depth_device1_.cols();
depth_host_.height = people_detector_.depth_device1_.rows();
- depth_host_.points.resize(depth_host_.width * depth_host_.height);
+ depth_host_.resize(depth_host_.width * depth_host_.height);
people_detector_.depth_device1_.download(depth_host_.points, c);
}
const unsigned short *data = depth_wrapper->getDepthMetaData().Data();
depth_device_.upload(data, s, h, w);
- depth_host_.points.resize(w *h);
+ depth_host_.resize(w *h);
depth_host_.width = w;
depth_host_.height = h;
std::copy(data, data + w * h, &depth_host_[0]);
image_wrapper->fillRGB(w, h, (unsigned char*)&rgb_host_[0]);
// convert to rgba, TODO image_wrapper should be updated to support rgba directly
- rgba_host_.points.resize(w * h);
+ rgba_host_.resize(w * h);
rgba_host_.width = w;
rgba_host_.height = h;
for(int i = 0; i < rgba_host_.size(); ++i)
-cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
project(greedy_projection)
#include <pcl/point_types.h>
#include <pcl/io/pcd_io.h>
-#include <pcl/kdtree/kdtree_flann.h>
+#include <pcl/search/kdtree.h> // for KdTree
#include <pcl/features/normal_3d.h>
#include <pcl/surface/gp3.h>
int
-main (int argc, char** argv)
+main ()
{
// Load input file into a PointCloud<T> with an appropriate type
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud (new pcl::PointCloud<pcl::PointXYZ>);
-cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
project(ground_based_rgbd_people_detector)
find_package(PCL 1.7 REQUIRED)
-cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
project(iccv11)
* Return: A pointer to a point cloud of keypoints
*/
PointCloudPtr
-detectKeypoints (const PointCloudPtr & points, const SurfaceNormalsPtr & normals,
+detectKeypoints (const PointCloudPtr & points, const SurfaceNormalsPtr & /*normals*/,
float min_scale, int nr_octaves, int nr_scales_per_octave, float min_contrast)
{
pcl::SIFTKeypoint<PointT, pcl::PointWithScale> sift_detect;
* The minimum distance between any two random samples
* max_correspondence_distance
* The
- * nr_interations
+ * nr_iterations
* The number of RANSAC iterations to perform
* Return: A transformation matrix that will roughly align the points in source to the points in target
*/
* The "source" points, i.e., the points that must be transformed to align with the target point cloud
* target_points
* The "target" points, i.e., the points to which the source point cloud will be aligned
- * intial_alignment
+ * initial_alignment
* An initial estimate of the transformation matrix that aligns the source points to the target points
* max_correspondence_distance
* A threshold on the distance between any two corresponding points. Any corresponding points that are further
#include <pcl/io/pcd_io.h>
#include <pcl/visualization/pcl_visualizer.h>
#include <boost/filesystem.hpp>
+#include <boost/algorithm/string.hpp> // for split, is_any_of
namespace bf = boost::filesystem;
inline void
//Parse filter parameters
std::string filter_parameters_file;
- pcl::console::parse_argument (argc, argv, "--filter", filter_parameters_file) > 0;
+ if (pcl::console::parse_argument (argc, argv, "--filter", filter_parameters_file) < 0)
+ {
+ pcl::console::print_error ("Missing option --filter\n");
+ return (1);
+ }
params_stream.open (filter_parameters_file.c_str ());
if (params_stream.is_open())
{
// Parse segmentation parameters
std::string segmentation_parameters_file;
- pcl::console::parse_argument (argc, argv, "--segment", segmentation_parameters_file) > 0;
+ if (pcl::console::parse_argument (argc, argv, "--segment", segmentation_parameters_file) < 0)
+ {
+ pcl::console::print_error ("Missing option --segment\n");
+ return (1);
+ }
params_stream.open (segmentation_parameters_file.c_str ());
if (params_stream.is_open())
{
// Parse feature estimation parameters
std::string feature_estimation_parameters_file;
- pcl::console::parse_argument (argc, argv, "--feature", feature_estimation_parameters_file) > 0;
+ if (pcl::console::parse_argument (argc, argv, "--feature", feature_estimation_parameters_file) < 0)
+ {
+ pcl::console::print_error ("Missing option --feature\n");
+ return (1);
+ }
params_stream.open (feature_estimation_parameters_file.c_str ());
if (params_stream.is_open())
{
// Parse the registration parameters
std::string registration_parameters_file;
- pcl::console::parse_argument (argc, argv, "--registration", registration_parameters_file) > 0;
+ if (pcl::console::parse_argument (argc, argv, "--registration", registration_parameters_file) < 0)
+ {
+ pcl::console::print_error ("Missing option --registration\n");
+ return (1);
+ }
params_stream.open (registration_parameters_file.c_str ());
if (params_stream.is_open())
{
//Parse filter parameters
std::string filter_parameters_file;
- pcl::console::parse_argument (argc, argv, "--filter", filter_parameters_file) > 0;
+ if (pcl::console::parse_argument (argc, argv, "--filter", filter_parameters_file) < 0)
+ {
+ pcl::console::print_error ("Missing option --filter\n");
+ return (1);
+ }
params_stream.open (filter_parameters_file.c_str ());
if (params_stream.is_open())
{
// Parse segmentation parameters
std::string segmentation_parameters_file;
- pcl::console::parse_argument (argc, argv, "--segment", segmentation_parameters_file) > 0;
+ if (pcl::console::parse_argument (argc, argv, "--segment", segmentation_parameters_file) < 0)
+ {
+ pcl::console::print_error ("Missing option --segment\n");
+ return (1);
+ }
params_stream.open (segmentation_parameters_file.c_str ());
if (params_stream.is_open())
{
// Parse feature estimation parameters
std::string feature_estimation_parameters_file;
- pcl::console::parse_argument (argc, argv, "--feature", feature_estimation_parameters_file) > 0;
+ if (pcl::console::parse_argument (argc, argv, "--feature", feature_estimation_parameters_file) < 0)
+ {
+ pcl::console::print_error ("Missing option --feature\n");
+ return (1);
+ }
params_stream.open (feature_estimation_parameters_file.c_str ());
if (params_stream.is_open())
{
// Parse the registration parameters
std::string registration_parameters_file;
- pcl::console::parse_argument (argc, argv, "--registration", registration_parameters_file) > 0;
+ if (pcl::console::parse_argument (argc, argv, "--registration", registration_parameters_file) < 0)
+ {
+ pcl::console::print_error ("Missing option --registration\n");
+ return (1);
+ }
params_stream.open (registration_parameters_file.c_str ());
if (params_stream.is_open())
{
visualize_correspondences (const PointCloudPtr points1, const PointCloudPtr keypoints1,
const PointCloudPtr points2, const PointCloudPtr keypoints2,
const std::vector<int> &correspondences,
- const std::vector<float> &correspondence_scores, int max_to_display)
+ const std::vector<float> &correspondence_scores, std::size_t max_to_display)
{
// We want to visualize two clouds side-by-side, so do to this, we'll make copies of the clouds and transform them
// by shifting one to the left and the other to the right. Then we'll draw lines between the corresponding points
OpenNICapture::OpenNICapture (const std::string& device_id)
: grabber_ (device_id)
- , most_recent_frame_ ()
, frame_counter_ (0)
+ , most_recent_frame_ ()
, use_trigger_ (false)
, trigger_ (false)
{
#include <pcl/io/pcd_io.h>
#include <pcl/visualization/pcl_visualizer.h>
#include <pcl/visualization/histogram_visualizer.h>
+#include <boost/algorithm/string/split.hpp> // for split
int
main (int argc, char ** argv)
//Parse filter parameters
std::string filter_parameters_file;
- pcl::console::parse_argument (argc, argv, "--filter", filter_parameters_file) > 0;
+ if (pcl::console::parse_argument (argc, argv, "--filter", filter_parameters_file) < 0)
+ {
+ pcl::console::print_error ("Missing option --filter\n");
+ return (1);
+ }
input_stream.open (filter_parameters_file.c_str ());
if (input_stream.is_open())
{
// Parse segmentation parameters
std::string segmentation_parameters_file;
- pcl::console::parse_argument (argc, argv, "--segment", segmentation_parameters_file) > 0;
+ if (pcl::console::parse_argument (argc, argv, "--segment", segmentation_parameters_file) < 0)
+ {
+ pcl::console::print_error ("Missing option --segment\n");
+ return (1);
+ }
input_stream.open (segmentation_parameters_file.c_str ());
if (input_stream.is_open())
{
// Parse feature estimation parameters
std::string feature_estimation_parameters_file;
- pcl::console::parse_argument (argc, argv, "--feature", feature_estimation_parameters_file) > 0;
+ if (pcl::console::parse_argument (argc, argv, "--feature", feature_estimation_parameters_file) < 0)
+ {
+ pcl::console::print_error ("Missing option --feature\n");
+ return (1);
+ }
input_stream.open (feature_estimation_parameters_file.c_str ());
if (input_stream.is_open())
{
// Parse the registration parameters
std::string registration_parameters_file;
- pcl::console::parse_argument (argc, argv, "--registration", registration_parameters_file) > 0;
+ if (pcl::console::parse_argument (argc, argv, "--registration", registration_parameters_file) < 0)
+ {
+ pcl::console::print_error ("Missing option --registration\n");
+ return (1);
+ }
input_stream.open (registration_parameters_file.c_str ());
if (input_stream.is_open())
{
#include <pcl/console/parse.h>
#include <pcl/io/pcd_io.h>
#include <pcl/visualization/pcl_visualizer.h>
-
+#include <boost/algorithm/string/split.hpp> // for split
#include "load_clouds.h"
int
#include <pcl/console/parse.h>
#include <pcl/io/pcd_io.h>
#include <pcl/visualization/pcl_visualizer.h>
+#include <boost/algorithm/string/split.hpp> // for split
int
main (int argc, char ** argv)
#include <pcl/features/pfhrgb.h>
#include <pcl/features/3dsc.h>
#include <pcl/features/shot_omp.h>
+#include <pcl/filters/extract_indices.h> // for ExtractIndices
#include <pcl/kdtree/kdtree_flann.h>
#include <pcl/kdtree/impl/kdtree_flann.hpp>
#include <pcl/registration/transformation_estimation_svd.h>
std::cout << "correspondence rejection..." << std::flush;
std::vector<std::pair<unsigned, unsigned> > correspondences;
for (unsigned cIdx = 0; cIdx < source2target_.size (); ++cIdx)
- if (target2source_[source2target_[cIdx]] == cIdx)
+ if (static_cast<unsigned int>(target2source_[source2target_[cIdx]]) == cIdx)
correspondences.push_back(std::make_pair(cIdx, source2target_[cIdx]));
correspondences_->resize (correspondences.size());
}
template<typename FeatureType>
-void ICCVTutorial<FeatureType>::keyboard_callback (const pcl::visualization::KeyboardEvent& event, void* cookie)
+void ICCVTutorial<FeatureType>::keyboard_callback (const pcl::visualization::KeyboardEvent& event, void* /*cookie*/)
{
if (event.keyUp())
{
-cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
project(implicit_shape_model)
void
keyboardEventOccurred (const pcl::visualization::KeyboardEvent& event,
- void* nothing)
+ void*)
{
if (event.getKeySym () == "space" && event.keyDown ())
next_iteration = true;
-cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
project(iros11)
{}
void
- populateDatabase (const std::vector<std::string> & filenames)
+ populateDatabase (const std::vector<std::string> & /*filenames*/)
{
}
const ObjectModel &
- recognizeObject (const PointCloudPtr & query_cloud)
+ recognizeObject (const PointCloudPtr & /*query_cloud*/)
{
int best_match = 0;
return (models_[best_match]);
}
PointCloudPtr
- recognizeAndAlignPoints (const PointCloudPtr & query_cloud)
+ recognizeAndAlignPoints (const PointCloudPtr & /*query_cloud*/)
{
PointCloudPtr output;
return (output);
* The minimum distance between any two random samples
* max_correspondence_distance
* The
- * nr_interations
+ * nr_iterations
* The number of RANSAC iterations to perform
* Return: A transformation matrix that will roughly align the points in source to the points in target
*/
* The "source" points, i.e., the points that must be transformed to align with the target point cloud
* target_points
* The "target" points, i.e., the points to which the source point cloud will be aligned
- * intial_alignment
+ * initial_alignment
* An initial estimate of the transformation matrix that aligns the source points to the target points
* max_correspondence_distance
* A threshold on the distance between any two corresponding points. Any corresponding points that are further
* Return: A pointer to a point cloud of keypoints
*/
PointCloudPtr
-detectKeypoints (const PointCloudPtr & points, const SurfaceNormalsPtr & normals,
+detectKeypoints (const PointCloudPtr & points, const SurfaceNormalsPtr & /*normals*/,
float min_scale, int nr_octaves, int nr_scales_per_octave, float min_contrast)
{
pcl::SIFTKeypoint<PointT, pcl::PointWithScale> sift_detect;
* The minimum distance between any two random samples
* max_correspondence_distance
* The
- * nr_interations
+ * nr_iterations
* The number of RANSAC iterations to perform
* Return: A transformation matrix that will roughly align the points in source to the points in target
*/
* The "source" points, i.e., the points that must be transformed to align with the target point cloud
* target_points
* The "target" points, i.e., the points to which the source point cloud will be aligned
- * intial_alignment
+ * initial_alignment
* An initial estimate of the transformation matrix that aligns the source points to the target points
* max_correspondence_distance
* A threshold on the distance between any two corresponding points. Any corresponding points that are further
using MeshPtr = std::shared_ptr<Mesh>;
PointCloudPtr
-smoothPointCloud (const PointCloudPtr & input, float radius, int polynomial_order)
+smoothPointCloud (const PointCloudPtr & /*input*/, float /*radius*/, int /*polynomial_order*/)
{
PointCloudPtr output (new PointCloud);
return (output);
}
SurfaceElementsPtr
-computeSurfaceElements (const PointCloudPtr & input, float radius, int polynomial_order)
+computeSurfaceElements (const PointCloudPtr & /*input*/, float /*radius*/, int /*polynomial_order*/)
{
SurfaceElementsPtr surfels (new SurfaceElements);
return (surfels);
}
MeshPtr
-computeConvexHull (const PointCloudPtr & input)
+computeConvexHull (const PointCloudPtr & /*input*/)
{
MeshPtr output (new Mesh);
return (output);
MeshPtr
-computeConcaveHull (const PointCloudPtr & input, float alpha)
+computeConcaveHull (const PointCloudPtr & /*input*/, float /*alpha*/)
{
MeshPtr output (new Mesh);
return (output);
}
pcl::PolygonMesh::Ptr
-greedyTriangulation (const SurfaceElementsPtr & surfels, float radius, float mu, int max_nearest_neighbors,
- float max_surface_angle, float min_angle, float max_angle)
+greedyTriangulation (const SurfaceElementsPtr & /*surfels*/, float /*radius*/, float /*mu*/, int /*max_nearest_neighbors*/,
+ float /*max_surface_angle*/, float /*min_angle*/, float /*max_angle*/)
{
pcl::PolygonMesh::Ptr output (new pcl::PolygonMesh);
pcl::PolygonMesh::Ptr
-marchingCubesTriangulation (const SurfaceElementsPtr & surfels, float leaf_size, float iso_level)
+marchingCubesTriangulation (const SurfaceElementsPtr & /*surfels*/, float /*leaf_size*/, float /*iso_level*/)
{
pcl::PolygonMesh::Ptr output (new pcl::PolygonMesh);
return (output);
#include <pcl/io/pcd_io.h>
#include <pcl/visualization/pcl_visualizer.h>
#include <boost/filesystem.hpp>
+#include <boost/algorithm/string.hpp> // for split, is_any_of
namespace bf = boost::filesystem;
inline void
//Parse filter parameters
std::string filter_parameters_file;
- pcl::console::parse_argument (argc, argv, "--filter", filter_parameters_file) > 0;
+ if (pcl::console::parse_argument (argc, argv, "--filter", filter_parameters_file) < 0)
+ {
+ pcl::console::print_error ("Missing option --filter\n");
+ return (1);
+ }
params_stream.open (filter_parameters_file.c_str ());
if (params_stream.is_open())
{
// Parse segmentation parameters
std::string segmentation_parameters_file;
- pcl::console::parse_argument (argc, argv, "--segment", segmentation_parameters_file) > 0;
+ if (pcl::console::parse_argument (argc, argv, "--segment", segmentation_parameters_file) < 0)
+ {
+ pcl::console::print_error ("Missing option --segment\n");
+ return (1);
+ }
params_stream.open (segmentation_parameters_file.c_str ());
if (params_stream.is_open())
{
// Parse feature estimation parameters
std::string feature_estimation_parameters_file;
- pcl::console::parse_argument (argc, argv, "--feature", feature_estimation_parameters_file) > 0;
+ if (pcl::console::parse_argument (argc, argv, "--feature", feature_estimation_parameters_file) < 0)
+ {
+ pcl::console::print_error ("Missing option --feature\n");
+ return (1);
+ }
params_stream.open (feature_estimation_parameters_file.c_str ());
if (params_stream.is_open())
{
// Parse the registration parameters
std::string registration_parameters_file;
- pcl::console::parse_argument (argc, argv, "--registration", registration_parameters_file) > 0;
+ if (pcl::console::parse_argument (argc, argv, "--registration", registration_parameters_file) < 0)
+ {
+ pcl::console::print_error ("Missing option --registration\n");
+ return (1);
+ }
params_stream.open (registration_parameters_file.c_str ());
if (params_stream.is_open())
{
//Parse filter parameters
std::string filter_parameters_file;
- pcl::console::parse_argument (argc, argv, "--filter", filter_parameters_file) > 0;
+ if (pcl::console::parse_argument (argc, argv, "--filter", filter_parameters_file) < 0)
+ {
+ pcl::console::print_error ("Missing option --filter\n");
+ return (1);
+ }
params_stream.open (filter_parameters_file.c_str ());
if (params_stream.is_open())
{
// Parse segmentation parameters
std::string segmentation_parameters_file;
- pcl::console::parse_argument (argc, argv, "--segment", segmentation_parameters_file) > 0;
+ if (pcl::console::parse_argument (argc, argv, "--segment", segmentation_parameters_file) < 0)
+ {
+ pcl::console::print_error ("Missing option --segment\n");
+ return (1);
+ }
params_stream.open (segmentation_parameters_file.c_str ());
if (params_stream.is_open())
{
// Parse feature estimation parameters
std::string feature_estimation_parameters_file;
- pcl::console::parse_argument (argc, argv, "--feature", feature_estimation_parameters_file) > 0;
+ if (pcl::console::parse_argument (argc, argv, "--feature", feature_estimation_parameters_file) < 0)
+ {
+ pcl::console::print_error ("Missing option --feature\n");
+ return (1);
+ }
params_stream.open (feature_estimation_parameters_file.c_str ());
if (params_stream.is_open())
{
// Parse the registration parameters
std::string registration_parameters_file;
- pcl::console::parse_argument (argc, argv, "--registration", registration_parameters_file) > 0;
+ if (pcl::console::parse_argument (argc, argv, "--registration", registration_parameters_file) < 0)
+ {
+ pcl::console::print_error ("Missing option --registration\n");
+ return (1);
+ }
params_stream.open (registration_parameters_file.c_str ());
if (params_stream.is_open())
{
visualize_correspondences (const PointCloudPtr points1, const PointCloudPtr keypoints1,
const PointCloudPtr points2, const PointCloudPtr keypoints2,
const std::vector<int> &correspondences,
- const std::vector<float> &correspondence_scores, int max_to_display)
+ const std::vector<float> &correspondence_scores, std::size_t max_to_display)
{
// We want to visualize two clouds side-by-side, so do to this, we'll make copies of the clouds and transform them
// by shifting one to the left and the other to the right. Then we'll draw lines between the corresponding points
OpenNICapture::OpenNICapture (const std::string& device_id)
: grabber_ (device_id)
- , most_recent_frame_ ()
+ , preview_ ()
, frame_counter_ (0)
+ , most_recent_frame_ ()
, use_trigger_ (false)
, trigger_ (false)
- , preview_ ()
{
// Register a callback function to our OpenNI grabber...
std::function<void (const PointCloudConstPtr&)> frame_cb = [this] (const PointCloudConstPtr& cloud) { onNewFrame (cloud); };
#include <pcl/io/pcd_io.h>
#include <pcl/visualization/pcl_visualizer.h>
#include <pcl/visualization/histogram_visualizer.h>
+#include <boost/algorithm/string/split.hpp> // for split
int
main (int argc, char ** argv)
//Parse filter parameters
std::string filter_parameters_file;
- pcl::console::parse_argument (argc, argv, "--filter", filter_parameters_file) > 0;
+ if (pcl::console::parse_argument (argc, argv, "--filter", filter_parameters_file) < 0)
+ {
+ pcl::console::print_error ("Missing option --filter\n");
+ return (1);
+ }
input_stream.open (filter_parameters_file.c_str ());
if (input_stream.is_open())
{
// Parse segmentation parameters
std::string segmentation_parameters_file;
- pcl::console::parse_argument (argc, argv, "--segment", segmentation_parameters_file) > 0;
+ if (pcl::console::parse_argument (argc, argv, "--segment", segmentation_parameters_file) < 0)
+ {
+ pcl::console::print_error ("Missing option --segment\n");
+ return (1);
+ }
input_stream.open (segmentation_parameters_file.c_str ());
if (input_stream.is_open())
{
// Parse feature estimation parameters
std::string feature_estimation_parameters_file;
- pcl::console::parse_argument (argc, argv, "--feature", feature_estimation_parameters_file) > 0;
+ if (pcl::console::parse_argument (argc, argv, "--feature", feature_estimation_parameters_file) < 0)
+ {
+ pcl::console::print_error ("Missing option --feature\n");
+ return (1);
+ }
input_stream.open (feature_estimation_parameters_file.c_str ());
if (input_stream.is_open())
{
// Parse the registration parameters
std::string registration_parameters_file;
- pcl::console::parse_argument (argc, argv, "--registration", registration_parameters_file) > 0;
+ if (pcl::console::parse_argument (argc, argv, "--registration", registration_parameters_file) < 0)
+ {
+ pcl::console::print_error ("Missing option --registration\n");
+ return (1);
+ }
input_stream.open (registration_parameters_file.c_str ());
if (input_stream.is_open())
{
#include <pcl/console/parse.h>
#include <pcl/io/pcd_io.h>
#include <pcl/visualization/pcl_visualizer.h>
-
+#include <boost/algorithm/string/split.hpp> // for split
#include "load_clouds.h"
int
#include <pcl/console/parse.h>
#include <pcl/io/pcd_io.h>
#include <pcl/visualization/pcl_visualizer.h>
+#include <boost/algorithm/string/split.hpp> // for split
int
main (int argc, char ** argv)
-cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
project(iterative_closest_point)
#include <pcl/registration/icp.h>
int
- main (int argc, char** argv)
+ main ()
{
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_in (new pcl::PointCloud<pcl::PointXYZ>(5,1));
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_out (new pcl::PointCloud<pcl::PointXYZ>);
-cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
project(kdtree_search)
#include <ctime>
int
-main (int argc, char** argv)
+main ()
{
srand (time (NULL));
int K = 10;
- std::vector<int> pointIdxNKNSearch(K);
- std::vector<float> pointNKNSquaredDistance(K);
+ std::vector<int> pointIdxKNNSearch(K);
+ std::vector<float> pointKNNSquaredDistance(K);
std::cout << "K nearest neighbor search at (" << searchPoint.x
<< " " << searchPoint.y
<< " " << searchPoint.z
<< ") with K=" << K << std::endl;
- if ( kdtree.nearestKSearch (searchPoint, K, pointIdxNKNSearch, pointNKNSquaredDistance) > 0 )
+ if ( kdtree.nearestKSearch (searchPoint, K, pointIdxKNNSearch, pointKNNSquaredDistance) > 0 )
{
- for (std::size_t i = 0; i < pointIdxNKNSearch.size (); ++i)
- std::cout << " " << (*cloud)[ pointIdxNKNSearch[i] ].x
- << " " << (*cloud)[ pointIdxNKNSearch[i] ].y
- << " " << (*cloud)[ pointIdxNKNSearch[i] ].z
- << " (squared distance: " << pointNKNSquaredDistance[i] << ")" << std::endl;
+ for (std::size_t i = 0; i < pointIdxKNNSearch.size (); ++i)
+ std::cout << " " << (*cloud)[ pointIdxKNNSearch[i] ].x
+ << " " << (*cloud)[ pointIdxKNNSearch[i] ].y
+ << " " << (*cloud)[ pointIdxKNNSearch[i] ].z
+ << " (squared distance: " << pointKNNSquaredDistance[i] << ")" << std::endl;
}
// Neighbors within radius search
-cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
project(min_cut_segmentation)
#include <pcl/io/pcd_io.h>
#include <pcl/point_types.h>
#include <pcl/visualization/cloud_viewer.h>
-#include <pcl/filters/passthrough.h>
+#include <pcl/filters/filter_indices.h> // for pcl::removeNaNFromPointCloud
#include <pcl/segmentation/min_cut_segmentation.h>
-int main (int argc, char** argv)
+int main ()
{
pcl::PointCloud <pcl::PointXYZ>::Ptr cloud (new pcl::PointCloud <pcl::PointXYZ>);
if ( pcl::io::loadPCDFile <pcl::PointXYZ> ("min_cut_segmentation_tutorial.pcd", *cloud) == -1 )
}
pcl::IndicesPtr indices (new std::vector <int>);
- pcl::PassThrough<pcl::PointXYZ> pass;
- pass.setInputCloud (cloud);
- pass.setFilterFieldName ("z");
- pass.setFilterLimits (0.0, 1.0);
- pass.filter (*indices);
+ pcl::removeNaNFromPointCloud(*cloud, *indices);
pcl::MinCutSegmentation<pcl::PointXYZ> seg;
seg.setInputCloud (cloud);
}
return (0);
-}
\ No newline at end of file
+}
-cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
project(model_outlier_removal)
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_sphere_filtered (new pcl::PointCloud<pcl::PointXYZ>);
// 1. Generate cloud data
- int noise_size = 5;
- int sphere_data_size = 10;
+ std::size_t noise_size = 5;
+ std::size_t sphere_data_size = 10;
cloud->width = noise_size + sphere_data_size;
cloud->height = 1;
cloud->points.resize (cloud->width * cloud->height);
// 1.2 Add sphere:
double rand_x1 = 1;
double rand_x2 = 1;
- for (std::size_t i = noise_size; i < noise_size + sphere_data_size; ++i)
+ for (std::size_t i = noise_size; i < (noise_size + sphere_data_size); ++i)
{
// See: http://mathworld.wolfram.com/SpherePointPicking.html
while (pow (rand_x1, 2) + pow (rand_x2, 2) >= 1)
-cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
project(moment_of_inertia)
std::cout << "Now extracting NARFs in every image point.\n";
std::vector<std::vector<pcl::Narf*> > narfs;
narfs.resize (range_image.size ());
- int last_percentage=-1;
+ unsigned int last_percentage=0;
for (unsigned int y=0; y<range_image.height; ++y)
{
for (unsigned int x=0; x<range_image.width; ++x)
float surface_patch_world_size = narf.getSurfacePatchWorldSize ();
surface_patch_widget.showFloatImage (narf.getSurfacePatch (), surface_patch_pixel_size, surface_patch_pixel_size,
-0.5f*surface_patch_world_size, 0.5f*surface_patch_world_size, true);
- float surface_patch_rotation = narf.getSurfacePatchRotation ();
+ /*float surface_patch_rotation = narf.getSurfacePatchRotation ();
float patch_middle = 0.5f* (float (surface_patch_pixel_size-1));
float angle_step_size = pcl::deg2rad (360.0f)/narf.getDescriptorSize ();
float cell_size = surface_patch_world_size/float (surface_patch_pixel_size),
{
//surface_patch_widget.markLine (radius-0.5, radius-0.5, radius-0.5f + 2.0f*radius*sinf (rotations[i]),
//radius-0.5f - 2.0f*radius*std::cos (rotations[i]), pcl::visualization::Vector3ub (255,0,0));
- }
+ }*/
descriptor_widget.showFloatImage (narf.getDescriptor (), narf.getDescriptorSize (), 1, -0.1f, 0.3f, true);
for (float y=-0.5f; y<=0.5f; y+=0.01f)
{
PointType point; point.x = x; point.y = y; point.z = 2.0f - y;
- point_cloud.points.push_back (point);
+ point_cloud.push_back (point);
}
}
point_cloud.width = point_cloud.size (); point_cloud.height = 1;
for (float y=-0.5f; y<=0.5f; y+=0.01f)
{
PointType point; point.x = x; point.y = y; point.z = 2.0f - y;
- point_cloud.points.push_back (point);
+ point_cloud.push_back (point);
}
}
point_cloud.width = point_cloud.size (); point_cloud.height = 1;
-cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
project(normal_distributions_transform)
using namespace std::chrono_literals;
int
-main (int argc, char** argv)
+main ()
{
// Loading first scan of room.
pcl::PointCloud<pcl::PointXYZ>::Ptr target_cloud (new pcl::PointCloud<pcl::PointXYZ>);
-cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
project(octree_change_detection)
#include <ctime>
int
-main (int argc, char** argv)
+main ()
{
srand ((unsigned int) time (NULL));
-cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
project(octree_search)
#include <ctime>
int
-main (int argc, char** argv)
+main ()
{
srand ((unsigned int) time (NULL));
-cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
project(openni_grabber)
#include <iostream>
#include <pcl/io/openni_grabber.h>
#include <pcl/range_image/range_image_planar.h>
-#include <pcl/common/common_headers.h>
+#include <pcl/common/time.h> // for pcl::getTime
#include <pcl/visualization/range_image_visualizer.h>
#include <pcl/visualization/pcl_visualizer.h>
#include <pcl/features/range_image_border_extractor.h>
int width = depth_image_ptr->getWidth (), height = depth_image_ptr->getHeight ();
float center_x = width/2, center_y = height/2;
float focal_length_x = depth_image_ptr->getFocalLength (), focal_length_y = focal_length_x;
- float original_angular_resolution = asinf (0.5f*float (width)/float (focal_length_x)) / (0.5f*float (width));
+ // float original_angular_resolution = asinf (0.5f*float (width)/float (focal_length_x)) / (0.5f*float (width));
float desired_angular_resolution = angular_resolution;
range_image_planar.setDepthImage (depth_map, width, height, center_x, center_y,
focal_length_x, focal_length_y, desired_angular_resolution);
depth_image_mutex.unlock ();
- got_new_range_image = !range_image_planar.points.empty ();
+ got_new_range_image = !range_image_planar.empty ();
}
if (!got_new_range_image)
#include <iostream>
#include <pcl/io/openni_grabber.h>
#include <pcl/range_image/range_image_planar.h>
-#include <pcl/common/common_headers.h>
+#include <pcl/common/angles.h> // for pcl::deg2rad
#include <pcl/visualization/range_image_visualizer.h>
#include <pcl/visualization/pcl_visualizer.h>
#include <pcl/console/parse.h>
int width = depth_image_ptr->getWidth (), height = depth_image_ptr->getHeight ();
float center_x = width/2, center_y = height/2;
float focal_length_x = depth_image_ptr->getFocalLength (), focal_length_y = focal_length_x;
- float original_angular_resolution = asinf (0.5f*float (width)/float (focal_length_x)) / (0.5f*float (width));
+ // float original_angular_resolution = asinf (0.5f*float (width)/float (focal_length_x)) / (0.5f*float (width));
float desired_angular_resolution = angular_resolution;
range_image_planar.setDepthImage (depth_map, width, height, center_x, center_y,
focal_length_x, focal_length_y, desired_angular_resolution);
depth_image_mutex.unlock ();
- got_new_range_image = !range_image_planar.points.empty ();
+ got_new_range_image = !range_image_planar.empty ();
}
if (!got_new_range_image)
PointCloudColorHandlerGenericField<PointNormalT> tgt_color_handler (cloud_target, "curvature");
if (!tgt_color_handler.isCapable ())
- PCL_WARN ("Cannot create curvature color handler!");
+ PCL_WARN ("Cannot create curvature color handler!\n");
PointCloudColorHandlerGenericField<PointNormalT> src_color_handler (cloud_source, "curvature");
if (!src_color_handler.isCapable ())
- PCL_WARN ("Cannot create curvature color handler!");
+ PCL_WARN ("Cannot create curvature color handler!\n");
p->addPointCloud (cloud_target, tgt_color_handler, "target", vp_2);
// Check user input
if (data.empty ())
{
- PCL_ERROR ("Syntax is: %s <source.pcd> <target.pcd> [*]", argv[0]);
- PCL_ERROR ("[*] - multiple files can be added. The registration results of (i, i+1) will be registered against (i+2), etc");
+ PCL_ERROR ("Syntax is: %s <source.pcd> <target.pcd> [*]\n", argv[0]);
+ PCL_ERROR ("[*] - multiple files can be added. The registration results of (i, i+1) will be registered against (i+2), etc\n");
return (-1);
}
- PCL_INFO ("Loaded %d datasets.", (int)data.size ());
+ PCL_INFO ("Loaded %d datasets.\n", (int)data.size ());
// Create a PCLVisualizer object
p = new pcl::visualization::PCLVisualizer (argc, argv, "Pairwise Incremental Registration example");
-cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
project(passthrough)
#include <pcl/filters/passthrough.h>
int
- main (int argc, char** argv)
+ main ()
{
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud (new pcl::PointCloud<pcl::PointXYZ>);
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_filtered (new pcl::PointCloud<pcl::PointXYZ>);
-cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
project(pcd_read)
#include <pcl/point_types.h>
int
-main (int argc, char** argv)
+main ()
{
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud (new pcl::PointCloud<pcl::PointXYZ>);
-cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
project(pcd_write)
#include <pcl/point_types.h>
int
- main (int argc, char** argv)
+ main ()
{
pcl::PointCloud<pcl::PointXYZ> cloud;
cloud.width = 5;
cloud.height = 1;
cloud.is_dense = false;
- cloud.points.resize (cloud.width * cloud.height);
+ cloud.resize (cloud.width * cloud.height);
for (auto& point: cloud)
{
#include <pcl/visualization/pcl_painter2D.h>
//----------------------------------------------------------------------------
-int main (int argc, char * argv [])
+int main ()
{
pcl::visualization::PCLPainter2D *painter = new pcl::visualization::PCLPainter2D();
return 0;
-}
\ No newline at end of file
+}
-cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
project(pcl_plotter)
find_package(PCL 1.7 REQUIRED COMPONENTS common visualization)
include_directories(${PCL_INCLUDE_DIRS})
//............................................................................
int
-main (int argc, char * argv [])
+main ()
{
//defining a plotter
PCLPlotter *plotter = new PCLPlotter ("My Plotter");
#include <iostream>
#include <thread>
-#include <pcl/common/common_headers.h>
+#include <pcl/common/angles.h> // for pcl::deg2rad
#include <pcl/features/normal_3d.h>
#include <pcl/io/pcd_io.h>
#include <pcl/visualization/pcl_visualizer.h>
-cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
project(planar_segmentation)
#include <pcl/segmentation/sac_segmentation.h>
int
- main (int argc, char** argv)
+ main ()
{
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZ>);
if (inliers->indices.size () == 0)
{
- PCL_ERROR ("Could not estimate a planar model for the given dataset.");
+ PCL_ERROR ("Could not estimate a planar model for the given dataset.\n");
return (-1);
}
<< coefficients->values[3] << std::endl;
std::cerr << "Model inliers: " << inliers->indices.size () << std::endl;
- for (std::size_t i = 0; i < inliers->indices.size (); ++i)
for (const auto& idx: inliers->indices)
std::cerr << idx << " " << cloud->points[idx].x << " "
<< cloud->points[idx].y << " "
-cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
project(point_cloud_compression)
};
int
-main (int argc, char **argv)
+main ()
{
SimpleOpenNIViewer v;
v.run ();
-cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
project(project_inliers)
#include <iostream>
-#include <pcl/io/pcd_io.h>
+#include <pcl/point_cloud.h> // for PointCloud
#include <pcl/point_types.h>
#include <pcl/ModelCoefficients.h>
#include <pcl/filters/project_inliers.h>
int
- main (int argc, char** argv)
+ main ()
{
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud (new pcl::PointCloud<pcl::PointXYZ>);
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_projected (new pcl::PointCloud<pcl::PointXYZ>);
-cmake_minimum_required(VERSION 2.8.11)
+cmake_minimum_required(VERSION 3.5)
project(pcl_colorize_cloud)
PCLViewer::PCLViewer (QWidget *parent) :
QMainWindow (parent),
- ui (new Ui::PCLViewer),
filtering_axis_ (1), // = y
- color_mode_ (4) // = Rainbow
+ color_mode_ (4), // = Rainbow
+ ui (new Ui::PCLViewer)
{
ui->setupUi (this);
this->setWindowTitle ("PCL viewer");
-cmake_minimum_required(VERSION 2.8.11)
+cmake_minimum_required(VERSION 3.5)
project(pcl_visualizer)
-cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
project(radius_outlier_removal)
#include <pcl/filters/radius_outlier_removal.h>
int
- main (int argc, char** argv)
+ main ()
{
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud (new pcl::PointCloud<pcl::PointXYZ>);
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_filtered (new pcl::PointCloud<pcl::PointXYZ>);
-cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
project(random_sample_consensus)
#include <thread>
#include <pcl/console/parse.h>
-#include <pcl/filters/extract_indices.h>
-#include <pcl/io/pcd_io.h>
+#include <pcl/point_cloud.h> // for PointCloud
+#include <pcl/common/io.h> // for copyPointCloud
#include <pcl/point_types.h>
#include <pcl/sample_consensus/ransac.h>
#include <pcl/sample_consensus/sac_model_plane.h>
cloud->height = 1;
cloud->is_dense = false;
cloud->points.resize (cloud->width * cloud->height);
- for (pcl::index_t i = 0; i < cloud->size (); ++i)
+ for (pcl::index_t i = 0; i < static_cast<pcl::index_t>(cloud->size ()); ++i)
{
if (pcl::console::find_argument (argc, argv, "-s") >= 0 || pcl::console::find_argument (argc, argv, "-sf") >= 0)
{
for (float y=-0.5f; y<=0.5f; y+=0.01f)
{
PointType point; point.x = x; point.y = y; point.z = 2.0f - y;
- point_cloud.points.push_back (point);
+ point_cloud.push_back (point);
}
}
point_cloud.width = point_cloud.size (); point_cloud.height = 1;
for (int x=0; x< (int)range_image.width; ++x)
{
if (border_descriptions[y*range_image.width + x].traits[pcl::BORDER_TRAIT__OBSTACLE_BORDER])
- border_points.points.push_back (range_image[y*range_image.width + x]);
+ border_points.push_back (range_image[y*range_image.width + x]);
if (border_descriptions[y*range_image.width + x].traits[pcl::BORDER_TRAIT__VEIL_POINT])
- veil_points.points.push_back (range_image[y*range_image.width + x]);
+ veil_points.push_back (range_image[y*range_image.width + x]);
if (border_descriptions[y*range_image.width + x].traits[pcl::BORDER_TRAIT__SHADOW_BORDER])
- shadow_points.points.push_back (range_image[y*range_image.width + x]);
+ shadow_points.push_back (range_image[y*range_image.width + x]);
}
}
pcl::visualization::PointCloudColorHandlerCustom<pcl::PointWithRange> border_points_color_handler (border_points_ptr, 0, 255, 0);
#include <pcl/range_image/range_image.h>
-int main (int argc, char** argv) {
+int main () {
pcl::PointCloud<pcl::PointXYZ> pointCloud;
// Generate the data
point.x = 2.0f - y;
point.y = y;
point.z = z;
- pointCloud.points.push_back(point);
+ pointCloud.push_back(point);
}
}
pointCloud.width = pointCloud.size();
#include <iostream>
-#include <pcl/common/common_headers.h>
+
#include <pcl/range_image/range_image.h>
#include <pcl/io/pcd_io.h>
#include <pcl/visualization/range_image_visualizer.h>
for (float y=-0.5f; y<=0.5f; y+=0.01f)
{
PointType point; point.x = x; point.y = y; point.z = 2.0f - y;
- point_cloud.points.push_back (point);
+ point_cloud.push_back (point);
}
}
point_cloud.width = point_cloud.size (); point_cloud.height = 1;
-cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
project(region_growing_rgb_segmentation)
#include <pcl/search/search.h>
#include <pcl/search/kdtree.h>
#include <pcl/visualization/cloud_viewer.h>
-#include <pcl/filters/passthrough.h>
+#include <pcl/filters/filter_indices.h> // for pcl::removeNaNFromPointCloud
#include <pcl/segmentation/region_growing_rgb.h>
using namespace std::chrono_literals;
int
-main (int argc, char** argv)
+main ()
{
pcl::search::Search <pcl::PointXYZRGB>::Ptr tree (new pcl::search::KdTree<pcl::PointXYZRGB>);
}
pcl::IndicesPtr indices (new std::vector <int>);
- pcl::PassThrough<pcl::PointXYZRGB> pass;
- pass.setInputCloud (cloud);
- pass.setFilterFieldName ("z");
- pass.setFilterLimits (0.0, 1.0);
- pass.filter (*indices);
+ pcl::removeNaNFromPointCloud (*cloud, *indices);
pcl::RegionGrowingRGB<pcl::PointXYZRGB> reg;
reg.setInputCloud (cloud);
-cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
project(region_growing_segmentation)
#include <pcl/search/kdtree.h>
#include <pcl/features/normal_3d.h>
#include <pcl/visualization/cloud_viewer.h>
-#include <pcl/filters/passthrough.h>
+#include <pcl/filters/filter_indices.h> // for pcl::removeNaNFromPointCloud
#include <pcl/segmentation/region_growing.h>
int
-main (int argc, char** argv)
+main ()
{
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud (new pcl::PointCloud<pcl::PointXYZ>);
if ( pcl::io::loadPCDFile <pcl::PointXYZ> ("region_growing_tutorial.pcd", *cloud) == -1)
normal_estimator.compute (*normals);
pcl::IndicesPtr indices (new std::vector <int>);
- pcl::PassThrough<pcl::PointXYZ> pass;
- pass.setInputCloud (cloud);
- pass.setFilterFieldName ("z");
- pass.setFilterLimits (0.0, 1.0);
- pass.filter (*indices);
+ pcl::removeNaNFromPointCloud(*cloud, *indices);
pcl::RegionGrowing<pcl::PointXYZ, pcl::Normal> reg;
reg.setMinClusterSize (50);
reg.setSearchMethod (tree);
reg.setNumberOfNeighbours (30);
reg.setInputCloud (cloud);
- //reg.setIndices (indices);
+ reg.setIndices (indices);
reg.setInputNormals (normals);
reg.setSmoothnessThreshold (3.0 / 180.0 * M_PI);
reg.setCurvatureThreshold (1.0);
std::cout << "First cluster has " << clusters[0].indices.size () << " points." << std::endl;
std::cout << "These are the indices of the points of the initial" <<
std::endl << "cloud that belong to the first cluster:" << std::endl;
- int counter = 0;
+ std::size_t counter = 0;
while (counter < clusters[0].indices.size ())
{
std::cout << clusters[0].indices[counter] << ", ";
-cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
project(registration_api)
// Reject correspondences based on their XYZ distance
rejectBadCorrespondences (all_correspondences, keypoints_src, keypoints_tgt, *good_correspondences);
- for (int i = 0; i < good_correspondences->size (); ++i)
- std::cerr << good_correspondences->at (i) << std::endl;
+ for (const auto& corr : (*good_correspondences))
+ std::cerr << corr << std::endl;
// Obtain the best transformation between the two sets of keypoints given the remaining correspondences
TransformationEstimationSVD<PointXYZ, PointXYZ> trans_est;
trans_est.estimateRigidTransformation (*keypoints_src, *keypoints_tgt, *good_correspondences, transform);
-cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
project(remove_outliers)
-cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
project(resampling)
#include <pcl/surface/mls.h>
int
-main (int argc, char** argv)
+main ()
{
// Load input file into a PointCloud<T> with an appropriate type
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud (new pcl::PointCloud<pcl::PointXYZ> ());
-cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
project(rops_feature)
-cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
project(statistical_removal)
#include <pcl/filters/statistical_outlier_removal.h>
int
-main (int argc, char** argv)
+main ()
{
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud (new pcl::PointCloud<pcl::PointXYZ>);
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_filtered (new pcl::PointCloud<pcl::PointXYZ>);
writer.write<pcl::PointXYZ> ("table_scene_lms400_outliers.pcd", *cloud_filtered, false);
return (0);
-}
\ No newline at end of file
+}
-cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
project(stick_segmentation)
#include <pcl/console/time.h>
#include <pcl/point_types.h>
#include <pcl/memory.h>
+#include <pcl/point_cloud.h> // for PointCloud
#include <pcl/io/pcd_io.h>
-#include <pcl/filters/passthrough.h>
#include <pcl/filters/voxel_grid.h>
#include <pcl/sample_consensus/method_types.h>
#include <pcl/segmentation/sac_segmentation.h>
-cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
project(supervoxel_clustering)
-cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
project(template_alignment)
-cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
project(openni_tracking)
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
#include <pcl/io/openni_grabber.h>
-#include <pcl/console/parse.h>
-#include <pcl/common/time.h>
#include <pcl/common/centroid.h>
+#include <pcl/common/transforms.h> // for transformPointCloud
#include <pcl/visualization/cloud_viewer.h>
#include <pcl/visualization/pcl_visualizer.h>
#include <pcl/io/pcd_io.h>
#include <pcl/filters/passthrough.h>
-#include <pcl/filters/voxel_grid.h>
#include <pcl/filters/approximate_voxel_grid.h>
-#include <pcl/sample_consensus/method_types.h>
-#include <pcl/sample_consensus/model_types.h>
-
-#include <pcl/search/pcl_search.h>
-#include <pcl/common/transforms.h>
-
#include <pcl/tracking/tracking.h>
#include <pcl/tracking/particle_filter.h>
#include <pcl/tracking/kld_adaptive_particle_filter_omp.h>
#include <pcl/tracking/particle_filter_omp.h>
#include <pcl/tracking/coherence.h>
#include <pcl/tracking/distance_coherence.h>
-#include <pcl/tracking/hsv_color_coherence.h>
#include <pcl/tracking/approx_nearest_pair_point_cloud_coherence.h>
#include <pcl/tracking/nearest_pair_point_cloud_coherence.h>
-cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
project(vfh_cluster_classifier)
#include <pcl/point_types.h>
#include <pcl/point_cloud.h>
-#include <pcl/console/parse.h>
#include <pcl/console/print.h>
#include <pcl/io/pcd_io.h>
#include <boost/filesystem.hpp>
#include <flann/io/hdf5.h>
#include <fstream>
+
typedef std::pair<std::string, std::vector<float> > vfh_model;
/** \brief Loads an n-D histogram file as a VFH signature
#include <pcl/point_types.h>
#include <pcl/point_cloud.h>
#include <pcl/common/common.h>
-#include <pcl/common/transforms.h>
+#include <pcl/common/centroid.h> // for compute3DCentroid
#include <pcl/visualization/pcl_visualizer.h>
#include <pcl/console/parse.h>
#include <pcl/console/print.h>
#include <flann/flann.h>
#include <flann/io/hdf5.h>
#include <boost/filesystem.hpp>
-
+#include <boost/algorithm/string/replace.hpp> // for replace_last
typedef std::pair<std::string, std::vector<float> > vfh_model;
/** \brief Loads an n-D histogram file as a VFH signature
-cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
project(voxel_grid)
#include <pcl/filters/voxel_grid.h>
int
-main (int argc, char** argv)
+main ()
{
pcl::PCLPointCloud2::Ptr cloud (new pcl::PCLPointCloud2 ());
pcl::PCLPointCloud2::Ptr cloud_filtered (new pcl::PCLPointCloud2 ());
.. raw:: html
- <iframe title="Removing outliers using a StatisticalOutlierRemoval filter" width="480" height="390" src="http://www.youtube.com/embed/RjQPp2_GRnI?rel=0" frameborder="0" allowfullscreen></iframe>
+ <iframe title="Removing outliers using a StatisticalOutlierRemoval filter" width="480" height="390" src="https://www.youtube.com/embed/RjQPp2_GRnI?rel=0" frameborder="0" allowfullscreen></iframe>
Background
----------
.. raw:: html
- <iframe width="560" height="349" style="margin-left:50px" src="http://www.youtube.com/embed/1T5HxTTgE4I" frameborder="0" allowfullscreen></iframe>
+ <iframe width="560" height="349" style="margin-left:50px" src="https://www.youtube.com/embed/1T5HxTTgE4I" frameborder="0" allowfullscreen></iframe>
We can use the code below to fit a template of a person's face (the blue points) to a new point cloud (the green points).
----------------------------
This tutorial explains 6D object tracking and show example code(tracking_sample.cpp) using pcl::tracking libraries. Implementing this example code, you can see the segment track the target object even if you move tracked object or your sensor device. In example, first, you should initialize tracker and you have to pass target object's point cloud to tracker so that tracker should know what to track. So, before this tutorial, you need to make segmented model with PCD file beforehand. Setting the model to tracker, it starts tracking the object.
-Following figure shows how looks like when trakcing works successfully.
+Following figure shows how looks like when tracking works successfully.
.. figure:: images/tracking/mergePicture.png
:height: 600
.. literalinclude:: sources/tracking/tracking_sample.cpp
:language: cpp
- :lines: 227-242
+ :lines: 219-234
First, in main function, these lines set the parameters for tracking.
.. literalinclude:: sources/tracking/tracking_sample.cpp
:language: cpp
- :lines: 246-257
+ :lines: 237-249
Here, we set likelihood function which tracker use when calculate weights. You can add more likelihood function as you like. By default, there are normals likelihood and color likelihood functions. When you want to add other likelihood function, all you have to do is initialize new Coherence Class and add the Coherence instance to coherence variable with addPointCoherence function.
.. literalinclude:: sources/tracking/tracking_sample.cpp
:language: cpp
- :lines: 259-272
+ :lines: 251-264
In this part, we set the point cloud loaded from pcd file as reference model to tracker and also set model's transform values.
.. literalinclude:: sources/tracking/tracking_sample.cpp
:language: cpp
- :lines: 173-180
+ :lines: 165-172
Until the counter variable become equal to 10, we ignore the input point cloud, because the point cloud at first few frames often have noise. After counter variable reach to 10 frame, at each loop, we set downsampled input point cloud to tracker and the tracker will compute particles movement.
.. literalinclude:: sources/tracking/tracking_sample.cpp
:language: cpp
- :lines: 82-82
+ :lines: 74-74
In drawParticles function, you can get particles's positions by calling getParticles().
.. literalinclude:: sources/tracking/tracking_sample.cpp
:language: cpp
- :lines: 116-117
+ :lines: 108-109
.. raw:: html
- <iframe width="420" height="315" src="http://www.youtube.com/embed/AjjSZufyprU" frameborder="0" allowfullscreen></iframe>
+ <iframe width="420" height="315" src="https://www.youtube.com/embed/AjjSZufyprU" frameborder="0" allowfullscreen></iframe>
You may be wondering: *"What is the difference between a TSDF cloud and a normal point cloud?"* Well, a TSDF cloud *is* a point cloud. However, the TSDF cloud makes use of how the data is stored within GPU at KinFu runtime.
.. raw:: html
- <iframe width="420" height="315" src="http://www.youtube.com/embed/rF1N-EEIJao" frameborder="0" allowfullscreen></iframe>
+ <iframe width="420" height="315" src="https://www.youtube.com/embed/rF1N-EEIJao" frameborder="0" allowfullscreen></iframe>
The next part of this tutorial will demonstrate how to get a mesh from the TSDF cloud.
.. raw:: html
- <iframe width="420" height="315" src="http://www.youtube.com/embed/XMJ-ikSZAOE" frameborder="0" allowfullscreen></iframe>
+ <iframe width="420" height="315" src="https://www.youtube.com/embed/XMJ-ikSZAOE" frameborder="0" allowfullscreen></iframe>
The next part of this tutorial will demonstrate how to generate the texture for the mesh we have just created.
.. raw:: html
- <iframe width="420" height="315" src="http://www.youtube.com/embed/7S7Jj-4cKHs" frameborder="0" allowfullscreen></iframe>
+ <iframe width="420" height="315" src="https://www.youtube.com/embed/7S7Jj-4cKHs" frameborder="0" allowfullscreen></iframe>
Output
-------
.. raw:: html
- <iframe title="Downsampling a PointCloud using a VoxelGrid filter" width="480" height="390" src="http://www.youtube.com/embed/YHR6_OIxtFI?rel=0" frameborder="0" allowfullscreen></iframe>
+ <iframe title="Downsampling a PointCloud using a VoxelGrid filter" width="480" height="390" src="https://www.youtube.com/embed/YHR6_OIxtFI?rel=0" frameborder="0" allowfullscreen></iframe>
The code
--------
**Background**
- The *sample_consensus* library holds SAmple Consensus (SAC) methods like RANSAC and models like planes and cylinders. These can combined freely in order to detect specific models and their parameters in point clouds.
+ The *sample_consensus* library holds SAmple Consensus (SAC) methods like RANSAC and models like planes and cylinders. These can be combined freely in order to detect specific models and their parameters in point clouds.
A theoretical primer explaining how sample consensus algorithms work can be found in the `Random Sample Consensus tutorial <http://pointclouds.org/documentation/tutorials/random_sample_consensus.php#random-sample-consensus>`_
|
- * ``pcd_convert_NaN_nan``: converts "NaN" values to "nan" values. *(Note: Starting with PCL version 1.0.1 the string representation for NaN is “nan”.)*
+ * ``pcl_pcd_convert_NaN_nan``: converts "NaN" values to "nan" values. *(Note: Starting with PCL version 1.0.1 the string representation for NaN is “nan”.)*
**Usage example:**
- ``pcd_convert_NaN_nan input.pcd output.pcd``
+ ``pcl_pcd_convert_NaN_nan input.pcd output.pcd``
- * ``convert_pcd_ascii_binary``: converts PCD (Point Cloud Data) files from ASCII to binary and viceversa.
+ * ``pcl_convert_pcd_ascii_binary``: converts PCD (Point Cloud Data) files from ASCII to binary and viceversa.
**Usage example:**
- ``convert_pcd_ascii_binary <file_in.pcd> <file_out.pcd> 0/1/2 (ascii/binary/binary_compressed) [precision (ASCII)]``
+ ``pcl_convert_pcd_ascii_binary <file_in.pcd> <file_out.pcd> 0/1/2 (ascii/binary/binary_compressed) [precision (ASCII)]``
- * ``concatenate_points_pcd``: concatenates the points of two or more PCD (Point Cloud Data) files into a single PCD file.
+ * ``pcl_concatenate_points_pcd``: concatenates the points of two or more PCD (Point Cloud Data) files into a single PCD file.
**Usage example:**
- ``concatenate_points_pcd <filename 1..N.pcd>``
+ ``pcl_concatenate_points_pcd <filename 1..N.pcd>``
*(Note: the resulting PCD file will be ``output.pcd``)*
- * ``pcd2vtk``: converts PCD (Point Cloud Data) files to the `VTK format <http://www.vtk.org/VTK/img/file-formats.pdf>`_.
+ * ``pcl_pcd2vtk``: converts PCD (Point Cloud Data) files to the `VTK format <http://www.vtk.org/VTK/img/file-formats.pdf>`_.
**Usage example:**
- ``pcd2vtk input.pcd output.vtk``
+ ``pcl_pcd2vtk input.pcd output.vtk``
- * ``pcd2ply``: converts PCD (Point Cloud Data) files to the `PLY format <http://en.wikipedia.org/wiki/PLY_%28file_format%29>`_.
+ * ``pcl_pcd2ply``: converts PCD (Point Cloud Data) files to the `PLY format <http://en.wikipedia.org/wiki/PLY_%28file_format%29>`_.
**Usage example:**
- ``pcd2ply input.pcd output.ply``
+ ``pcl_pcd2ply input.pcd output.ply``
- * ``mesh2pcd``: convert a CAD model to a PCD (Point Cloud Data) file, using ray tracing operations.
+ * ``pcl_mesh2pcd``: convert a CAD model to a PCD (Point Cloud Data) file, using ray tracing operations.
- **Syntax is: mesh2pcd input.{ply,obj} output.pcd <options>**, where options are:
+ **Syntax is: pcl_mesh2pcd input.{ply,obj} output.pcd <options>**, where options are:
-level X = tessellated sphere level (default: 2)
.. _`octree_viewer`:
- * ``octree_viewer``: allows the visualization of `octrees`__
+ * ``pcl_octree_viewer``: allows the visualization of `octrees`__
**Syntax is: octree_viewer <file_name.pcd> <octree resolution>**
**Usage example:**
- ``Example: ./octree_viewer ../../test/bunny.pcd 0.02``
+ ``Example: ./pcl_octree_viewer ../../test/bunny.pcd 0.02``
.. image:: images/octree_bunny2.png
If you're not familiar with the PCL file structure already, please go ahead
and read the `PCL C++ Programming Style Guide
- <http://www.pointclouds.org/documentation/advanced/pcl_style_guide.php>`_ to
+ <https://pcl.readthedocs.io/projects/advanced/en/latest/pcl_style_guide.html>`_ to
familiarize yourself with the concepts.
There're two different ways we could set up the structure: i) set up the code
.. code-block:: cpp
:linenos:
- #ifndef PCL_FILTERS_BILATERAL_H_
- #define PCL_FILTERS_BILATERAL_H_
+ #pragma once
#include <pcl/filters/filter.h>
};
}
- #endif // PCL_FILTERS_BILATERAL_H_
-
bilateral.hpp
=============
.. code-block:: cpp
:linenos:
- #ifndef PCL_FILTERS_BILATERAL_IMPL_H_
- #define PCL_FILTERS_BILATERAL_IMPL_H_
+ #pragma once
#include <pcl/filters/bilateral.h>
-
- #endif // PCL_FILTERS_BILATERAL_IMPL_H_
This should be straightforward. We haven't declared any methods for
`BilateralFilter` yet, therefore there is no implementation.
.. code-block:: cpp
:linenos:
- #ifndef PCL_FILTERS_BILATERAL_H_
- #define PCL_FILTERS_BILATERAL_H_
+ #pragma once
#include <pcl/filters/filter.h>
#include <pcl/kdtree/kdtree.h>
};
}
- #endif // PCL_FILTERS_BILATERAL_H_
-
bilateral.hpp
=============
.. code-block:: cpp
:linenos:
- #ifndef PCL_FILTERS_BILATERAL_IMPL_H_
- #define PCL_FILTERS_BILATERAL_IMPL_H_
+ #pragma once
#include <pcl/filters/bilateral.h>
#define PCL_INSTANTIATE_BilateralFilter(T) template class PCL_EXPORTS pcl::BilateralFilter<T>;
- #endif // PCL_FILTERS_BILATERAL_IMPL_H_
-
One additional thing that we can do is error checking on:
* whether the two `sigma_s_` and `sigma_r_` parameters have been given;
.. code-block:: cpp
:linenos:
- #ifndef PCL_FILTERS_BILATERAL_IMPL_H_
- #define PCL_FILTERS_BILATERAL_IMPL_H_
+ #pragma once
#include <pcl/filters/bilateral.h>
#include <pcl/kdtree/kdtree_flann.h>
#define PCL_INSTANTIATE_BilateralFilter(T) template class PCL_EXPORTS pcl::BilateralFilter<T>;
- #endif // PCL_FILTERS_BILATERAL_IMPL_H_
-
Taking advantage of other PCL concepts
--------------------------------------
.. code-block:: cpp
:linenos:
- #ifndef PCL_FILTERS_BILATERAL_IMPL_H_
- #define PCL_FILTERS_BILATERAL_IMPL_H_
+ #pragma once
#include <pcl/filters/bilateral.h>
#include <pcl/kdtree/kdtree_flann.h>
#define PCL_INSTANTIATE_BilateralFilter(T) template class PCL_EXPORTS pcl::BilateralFilter<T>;
- #endif // PCL_FILTERS_BILATERAL_IMPL_H_
-
To make :pcl:`indices_<pcl::PCLBase::indices_>` work without typing the full
construct, we need to add a new line to *bilateral.h* that specifies the class
where `indices_` is declared:
.. code-block:: cpp
:linenos:
- /*
- * Software License Agreement (BSD License)
- *
- * Point Cloud Library (PCL) - www.pointclouds.org
- * Copyright (c) 2010-2011, Willow Garage, Inc.
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of Willow Garage, Inc. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- */
+ /*
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Point Cloud Library (PCL) - www.pointclouds.org
+ * Copyright (c) 2014-, Open Perception Inc.
+ *
+ * All rights reserved
+ */
An additional like can be inserted if additional copyright is needed (or the
original copyright can be changed):
.. code-block:: cpp
:linenos:
- /*
- * Software License Agreement (BSD License)
- *
- * Point Cloud Library (PCL) - www.pointclouds.org
- * Copyright (c) 2010-2011, Willow Garage, Inc.
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of Willow Garage, Inc. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
- #ifndef PCL_FILTERS_BILATERAL_H_
- #define PCL_FILTERS_BILATERAL_H_
+ /*
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Point Cloud Library (PCL) - www.pointclouds.org
+ * Copyright (c) 2014-, Open Perception Inc.
+ *
+ * All rights reserved
+ */
+
+ #pragma once
#include <pcl/filters/filter.h>
#include <pcl/kdtree/kdtree.h>
};
}
- #endif // PCL_FILTERS_BILATERAL_H_
-
And the *bilateral.hpp* likes:
.. code-block:: cpp
:linenos:
- /*
- * Software License Agreement (BSD License)
- *
- * Point Cloud Library (PCL) - www.pointclouds.org
- * Copyright (c) 2010-2011, Willow Garage, Inc.
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of Willow Garage, Inc. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
- #ifndef PCL_FILTERS_BILATERAL_IMPL_H_
- #define PCL_FILTERS_BILATERAL_IMPL_H_
+ /*
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Point Cloud Library (PCL) - www.pointclouds.org
+ * Copyright (c) 2014-, Open Perception Inc.
+ *
+ * All rights reserved
+ */
+
+ #pragma once
#include <pcl/filters/bilateral.h>
#include <pcl/kdtree/kdtree_flann.h>
#define PCL_INSTANTIATE_BilateralFilter(T) template class PCL_EXPORTS pcl::BilateralFilter<T>;
- #endif // PCL_FILTERS_BILATERAL_IMPL_H_
-
Testing the new class
---------------------
for (std::vector<pcl::PointIndices>::const_iterator it = cluster_indices.begin (); it != cluster_indices.end (); ++it, j++)
{
pcl::PointCloud<PointOutT>::Ptr cloud_cluster_don (new pcl::PointCloud<PointOutT>);
- for (const int &index : it->indices){
+ for (const auto &index : it->indices){
cloud_cluster_don->points.push_back ((*doncloud)[index]);
}
if (pcl::io::loadPCDFile<pcl::PointXYZ> (fileName, *cloud) == -1) // load the file
{
- PCL_ERROR ("Couldn't read file");
+ PCL_ERROR ("Couldn't read file\n");
return (-1);
}
if (pcl::io::loadPCDFile<pcl::PointXYZ> (filename, *cloud) == -1) // load the file
{
- PCL_ERROR ("Couldn't read file");
+ PCL_ERROR ("Couldn't read file\n");
return -1;
}
if (pcl::io::loadPCDFile<pcl::PointXYZ> (filename, *cloud) == -1) //* load the file
{
- PCL_ERROR ("Couldn't read file");
+ PCL_ERROR ("Couldn't read file\n");
return (-1);
}
if (pcl::io::loadPCDFile<pcl::PointXYZ> (filename, *cloud) == -1) //* load the file
{
- PCL_ERROR ("Couldn't read file");
+ PCL_ERROR ("Couldn't read file\n");
return (-1);
}
if (pcl::io::loadPCDFile<pcl::PointXYZI> (filename, *cloud) == -1) // load the file
{
- PCL_ERROR ("Couldn't read file");
+ PCL_ERROR ("Couldn't read file\n");
return -1;
}
if (pcl::io::loadPCDFile <pcl::PointXYZ> (filename, *cloud) == -1)
// load the file
{
- PCL_ERROR ("Couldn't read file");
+ PCL_ERROR ("Couldn't read file\n");
return (-1);
}
std::cout << "Loaded " << cloud->size () << " points." << std::endl;
if (pcl::io::loadPCDFile <pcl::PointXYZ> (filename, *cloud) == -1)
// load the file
{
- PCL_ERROR ("Couldn't read file");
+ PCL_ERROR("Couldn't read file\n");
return (-1);
}
std::cout << "Loaded " << cloud->size () << " points." << std::endl;
std::cout << "size: " << cloud->size () << std::endl;
- std::vector<int> indices;
+ pcl::Indices indices;
pcl::removeNaNFromPointCloud(*cloud, *output_cloud, indices);
std::cout << "size: " << output_cloud->size () << std::endl;
## Find VTK
if(NOT VTK_FOUND)
- set(DEFAULT FALSE)
- set(REASON "VTK was not found.")
+ set(DEFAULT FALSE)
+ set(REASON "VTK was not found.")
else()
- set(DEFAULT TRUE)
- set(REASON)
- include(${VTK_USE_FILE})
+ set(DEFAULT TRUE)
+ set(REASON)
endif()
PCL_ADD_EXAMPLE(pcl_example_sift_keypoint_estimation FILES example_sift_keypoint_estimation.cpp
*
*/
+#include <pcl/common/time.h> // for StopWatch
#include <pcl/io/pcd_io.h>
#include <pcl/keypoints/harris_3d.h>
pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud (new pcl::PointCloud<pcl::PointXYZRGB>);
if(pcl::io::loadPCDFile<pcl::PointXYZRGB> (filename, *cloud) == -1) // load the file
{
- PCL_ERROR ("Couldn't read file");
+ PCL_ERROR ("Couldn't read file\n");
return -1;
}
std::cout << "points: " << cloud->size () <<std::endl;
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_xyz (new pcl::PointCloud<pcl::PointXYZ>);
if(pcl::io::loadPCDFile<pcl::PointXYZ> (filename, *cloud_xyz) == -1) // load the file
{
- PCL_ERROR ("Couldn't read file");
+ PCL_ERROR("Couldn't read file\n");
return -1;
}
std::cout << "points: " << cloud_xyz->size () <<std::endl;
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_xyz (new pcl::PointCloud<pcl::PointXYZ>);
if(pcl::io::loadPCDFile<pcl::PointXYZ> (filename, *cloud_xyz) == -1) // load the file
{
- PCL_ERROR ("Couldn't read file");
+ PCL_ERROR("Couldn't read file\n");
return -1;
}
std::cout << "points: " << cloud_xyz->size () <<std::endl;
else()
set(DEFAULT TRUE)
set(REASON)
- include(${VTK_USE_FILE})
endif()
PCL_SUBSYS_DEPEND (build ${SUBSYS_NAME} DEPS outofcore io common octree filters visualization EXT_DEPS vtk)
## Find VTK
if(VTK_FOUND)
- include(${VTK_USE_FILE})
PCL_ADD_EXAMPLE(pcl_example_supervoxels FILES example_supervoxels.cpp
LINK_WITH pcl_common pcl_features pcl_segmentation pcl_octree pcl_kdtree pcl_visualization)
PCL_ADD_EXAMPLE(pcl_example_lccp_segmentation FILES example_lccp_segmentation.cpp
for (std::vector<pcl::PointIndices>::const_iterator it = cluster_indices.begin (); it != cluster_indices.end (); ++it)
{
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_cluster (new pcl::PointCloud<pcl::PointXYZ>);
- for (const int &index : it->indices)
+ for (const auto &index : it->indices)
cloud_cluster->push_back ((*cloud_ptr)[index]);
cloud_cluster->width = cloud_cluster->size ();
cloud_cluster->height = 1;
// Remove the nans
cloud_ptr->is_dense = false;
cloud_no_nans->is_dense = false;
- std::vector<int> indices;
+ pcl::Indices indices;
pcl::removeNaNFromPointCloud (*cloud_ptr, *cloud_no_nans, indices);
pcl::console::print_highlight("Removed nans from %zu to %zu\n",
static_cast<std::size_t>(cloud_ptr->size()),
clusters_file.open ("clusters.dat");
for (std::size_t i = 0; i < clusters.size (); ++i)
{
- clusters_file << i << "#" << clusters[i].indices.size () << ": ";
- std::vector<int>::const_iterator pit = clusters[i].indices.begin ();
- clusters_file << *pit;
- for (; pit != clusters[i].indices.end (); ++pit)
- clusters_file << " " << *pit;
+ clusters_file << i << "#" << clusters[i].indices.size () << ":";
+ for (const auto& cluster_idx : clusters[i].indices)
+ clusters_file << " " << cluster_idx;
clusters_file << std::endl;
}
clusters_file.close ();
else()
set(DEFAULT TRUE)
set(REASON)
- include(${VTK_USE_FILE})
endif()
PCL_ADD_EXAMPLE(pcl_example_stereo_baseline FILES example_stereo_baseline.cpp
else()
set(DEFAULT TRUE)
set(REASON)
- include(${VTK_USE_FILE})
endif()
PCL_ADD_EXAMPLE(pcl_test_nurbs_fitting_surface
-#include <pcl/point_cloud.h>
-
#include <pcl/visualization/pcl_visualizer.h>
#include <pcl/surface/on_nurbs/triangulation.h>
#include <random>
#include <pcl/point_types.h>
-#include <pcl/features/boost.h>
#include <pcl/features/feature.h>
namespace pcl
* \param[in,out] normal the normal to disambiguate, the calculation is performed in place
*/
void
- normalDisambiguation (pcl::PointCloud<PointNT> const &normals_cloud, std::vector<int> const &normal_indices,
+ normalDisambiguation (pcl::PointCloud<PointNT> const &normals_cloud, pcl::Indices const &normal_indices,
Eigen::Vector3f &normal);
/** \brief Compute Least Square Plane Fitting in a set of 3D points
#if defined __GNUC__
# pragma GCC system_header
#endif
-
+PCL_DEPRECATED_HEADER(1, 15, "Please include the needed boost headers directly.")
#include <boost/property_map/property_map.hpp>
#pragma once
-#include <pcl/features/eigen.h>
#include <pcl/features/feature.h>
namespace pcl
*/
bool
isBoundaryPoint (const pcl::PointCloud<PointInT> &cloud,
- int q_idx, const std::vector<int> &indices,
+ int q_idx, const pcl::Indices &indices,
const Eigen::Vector4f &u, const Eigen::Vector4f &v, const float angle_threshold);
/** \brief Check whether a point is a boundary point in a planar patch of projected points given by indices.
bool
isBoundaryPoint (const pcl::PointCloud<PointInT> &cloud,
const PointInT &q_point,
- const std::vector<int> &indices,
+ const pcl::Indices &indices,
const Eigen::Vector4f &u, const Eigen::Vector4f &v, const float angle_threshold);
/** \brief Set the decision boundary (angle threshold) that marks points as boundary or regular.
// PCL includes
#include <pcl/features/feature.h>
-#include <pcl/common/eigen.h>
#include <pcl/common/centroid.h>
#include <pcl/common/intensity.h>
#pragma once
#include <pcl/features/feature.h>
-#include <pcl/features/boost.h>
namespace pcl
{
#include <pcl/features/feature.h>
#include <pcl/features/vfh.h>
-#include <pcl/search/pcl_search.h>
-#include <pcl/common/common.h>
+#include <pcl/search/search.h> // for Search
namespace pcl
{
* \param[in] threshold threshold value for curvature
*/
void
- filterNormalsWithHighCurvature (const pcl::PointCloud<PointNT> & cloud, std::vector<int> & indices_to_use, std::vector<int> &indices_out,
- std::vector<int> &indices_in, float threshold);
+ filterNormalsWithHighCurvature (const pcl::PointCloud<PointNT> & cloud, pcl::Indices & indices_to_use, pcl::Indices &indices_out,
+ pcl::Indices &indices_in, float threshold);
/** \brief Set the viewpoint.
* \param[in] vpx the X coordinate of the viewpoint
#if defined __GNUC__
# pragma GCC system_header
#endif
-
+PCL_DEPRECATED_HEADER(1, 15, "Please include the needed eigen headers directly.")
#include <Eigen/StdVector>
#include <Eigen/Geometry>
using PointCloudOut = pcl::PointCloud<PointOutT>;
- using SearchMethod = std::function<int (std::size_t, double, std::vector<int> &, std::vector<float> &)>;
- using SearchMethodSurface = std::function<int (const PointCloudIn &cloud, std::size_t index, double, std::vector<int> &, std::vector<float> &)>;
+ using SearchMethod = std::function<int (std::size_t, double, pcl::Indices &, std::vector<float> &)>;
+ using SearchMethodSurface = std::function<int (const PointCloudIn &cloud, std::size_t index, double, pcl::Indices &, std::vector<float> &)>;
public:
/** \brief Empty constructor. */
*/
inline int
searchForNeighbors (std::size_t index, double parameter,
- std::vector<int> &indices, std::vector<float> &distances) const
+ pcl::Indices &indices, std::vector<float> &distances) const
{
return (search_method_surface_ (*input_, index, parameter, indices, distances));
}
*/
inline int
searchForNeighbors (const PointCloudIn &cloud, std::size_t index, double parameter,
- std::vector<int> &indices, std::vector<float> &distances) const
+ pcl::Indices &indices, std::vector<float> &distances) const
{
return (search_method_surface_ (cloud, index, parameter, indices, distances));
}
#pragma once
#include <pcl/features/feature.h>
-#include <set>
namespace pcl
{
*/
void
computePointSPFHSignature (const pcl::PointCloud<PointInT> &cloud,
- const pcl::PointCloud<PointNT> &normals, int p_idx, int row,
- const std::vector<int> &indices,
+ const pcl::PointCloud<PointNT> &normals, pcl::index_t p_idx, int row,
+ const pcl::Indices &indices,
Eigen::MatrixXf &hist_f1, Eigen::MatrixXf &hist_f2, Eigen::MatrixXf &hist_f3);
/** \brief Weight the SPFH (Simple Point Feature Histograms) individual histograms to create the final FPFH
weightPointSPFHSignature (const Eigen::MatrixXf &hist_f1,
const Eigen::MatrixXf &hist_f2,
const Eigen::MatrixXf &hist_f3,
- const std::vector<int> &indices,
+ const pcl::Indices &indices,
const std::vector<float> &dists,
Eigen::VectorXf &fpfh_histogram);
normals.header = cloud.header;
normals.width = cloud.width;
normals.height = cloud.height;
- normals.points.resize(nr_points);
+ normals.resize(nr_points);
for (auto& point: normals.points)
point.getNormalVector3fMap() = Eigen::Vector3f::Zero();
#pragma once
#include <pcl/features/feature.h>
-#include <pcl/common/common.h>
-#include <pcl/point_cloud.h>
namespace pcl
{
/** \brief Return the dominant label of a set of points. */
std::uint32_t
- getDominantLabel (const std::vector<int>& indices);
+ getDominantLabel (const pcl::Indices& indices);
/** \brief Compute the fixed-length histograms of transitions. */
void computeTransitionHistograms (const std::vector< std::vector<int> >& label_histograms,
#pragma once
#include <pcl/features/feature.h>
-#include <pcl/features/rsd.h>
#include <pcl/filters/voxel_grid.h>
namespace pcl
#include <pcl/common/utils.h>
#include <cmath>
-
+#include <numeric> // for partial_sum
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointInT, typename PointNT, typename PointOutT> bool
Eigen::Map<Eigen::Vector3f> normal (rf + 6);
// Find every point within specified search_radius_
- std::vector<int> nn_indices;
+ pcl::Indices nn_indices;
std::vector<float> nn_dists;
const std::size_t neighb_cnt = searchForNeighbors ((*indices_)[index], search_radius_, nn_indices, nn_dists);
if (neighb_cnt == 0)
const auto l = std::distance(phi_divisions_.cbegin (), std::prev(phi_min));
// Local point density = number of points in a sphere of radius "point_density_radius_" around the current neighbour
- std::vector<int> neighbour_indices;
+ pcl::Indices neighbour_indices;
std::vector<float> neighbour_distances;
int point_density = searchForNeighbors (*surface_, nn_indices[ne], point_density_radius_, neighbour_indices, neighbour_distances);
// point_density is NOT always bigger than 0 (on error, searchForNeighbors returns 0), so we must check for that
template<typename PointInT, typename PointNT, typename PointOutT> void
pcl::BOARDLocalReferenceFrameEstimation<PointInT, PointNT, PointOutT>::normalDisambiguation (
pcl::PointCloud<PointNT> const &normal_cloud,
- std::vector<int> const &normal_indices,
+ pcl::Indices const &normal_indices,
Eigen::Vector3f &normal)
{
Eigen::Vector3f normal_mean;
normal_mean.setZero ();
- for (const int &normal_index : normal_indices)
+ for (const auto &normal_index : normal_indices)
{
const PointNT& curPt = normal_cloud[normal_index];
//find Z axis
//extract support points for Rz radius
- std::vector<int> neighbours_indices;
+ pcl::Indices neighbours_indices;
std::vector<float> neighbours_distances;
std::size_t n_neighbours = this->searchForNeighbors (index, search_parameter_, neighbours_indices, neighbours_distances);
template <typename PointInT, typename PointNT, typename PointOutT> bool
pcl::BoundaryEstimation<PointInT, PointNT, PointOutT>::isBoundaryPoint (
const pcl::PointCloud<PointInT> &cloud, int q_idx,
- const std::vector<int> &indices,
+ const pcl::Indices &indices,
const Eigen::Vector4f &u, const Eigen::Vector4f &v,
const float angle_threshold)
{
template <typename PointInT, typename PointNT, typename PointOutT> bool
pcl::BoundaryEstimation<PointInT, PointNT, PointOutT>::isBoundaryPoint (
const pcl::PointCloud<PointInT> &cloud, const PointInT &q_point,
- const std::vector<int> &indices,
+ const pcl::Indices &indices,
const Eigen::Vector4f &u, const Eigen::Vector4f &v,
const float angle_threshold)
{
float max_dif = FLT_MIN, dif;
int cp = 0;
- for (const int &index : indices)
+ for (const auto &index : indices)
{
if (!std::isfinite (cloud[index].x) ||
!std::isfinite (cloud[index].y) ||
{
// Allocate enough space to hold the results
// \note This resize is irrelevant for a radiusSearch ().
- std::vector<int> nn_indices (k_);
+ pcl::Indices nn_indices (k_);
std::vector<float> nn_dists (k_);
Eigen::Vector4f u = Eigen::Vector4f::Zero (), v = Eigen::Vector4f::Zero ();
#define PCL_FEATURES_IMPL_CPPF_H_
#include <pcl/features/cppf.h>
-#include <pcl/features/pfh.h>
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointInT, typename PointNT, typename PointOutT>
output.is_dense = false;
}
- output.points.push_back (p);
+ output.push_back (p);
}
}
// overwrite the sizes done by Feature::initCompute ()
{
PCL_ERROR ("[pcl::%s::computeFeature] No input dataset containing normals was given!\n", getClassName ().c_str ());
output.width = output.height = 0;
- output.points.clear ();
+ output.clear ();
return;
}
{
PCL_ERROR ("[pcl::%s::computeFeature] The number of points in the input dataset differs from the number of points in the dataset containing the normals!\n", getClassName ().c_str ());
output.width = output.height = 0;
- output.points.clear ();
+ output.clear ();
return;
}
Eigen::Affine3f transformPC (Eigen::AngleAxisf (static_cast<float> (rotation), axis));
pcl::PointCloud<pcl::PointNormal> grid;
- grid.points.resize (indices_->size ());
+ grid.resize (indices_->size ());
for (std::size_t i = 0; i < indices_->size (); i++)
{
data.i /= freq_data[0].r;
}
- output.points.resize (1);
+ output.resize (1);
output.width = output.height = 1;
output[0].histogram[0] = freq_data[0].r; //dc
if (!Feature<PointInT, PointOutT>::initCompute ())
{
output.width = output.height = 0;
- output.points.clear ();
+ output.clear ();
return;
}
// Resize the output dataset
// we risk at pre-allocating too much memory which could lead to bad_alloc
// (see http://dev.pointclouds.org/issues/657)
output.width = output.height = 1;
- output.points.resize (1);
+ output.resize (1);
// Perform the actual feature computation
computeFeature (output);
// Create a bool vector of processed point indices, and initialize it to false
std::vector<bool> processed (cloud.size (), false);
- std::vector<int> nn_indices;
+ pcl::Indices nn_indices;
std::vector<float> nn_distances;
// Process all points in the indices vector
for (std::size_t i = 0; i < cloud.size (); ++i)
template<typename PointInT, typename PointNT, typename PointOutT> void
pcl::CVFHEstimation<PointInT, PointNT, PointOutT>::filterNormalsWithHighCurvature (
const pcl::PointCloud<PointNT> & cloud,
- std::vector<int> &indices_to_use,
- std::vector<int> &indices_out,
- std::vector<int> &indices_in,
+ pcl::Indices &indices_to_use,
+ pcl::Indices &indices_out,
+ pcl::Indices &indices_in,
float threshold)
{
indices_out.resize (cloud.size ());
std::size_t in, out;
in = out = 0;
- for (const int &index : indices_to_use)
+ for (const auto &index : indices_to_use)
{
if (cloud[index].curvature > threshold)
{
{
PCL_ERROR ("[pcl::%s::computeFeature] No input dataset containing normals was given!\n", getClassName ().c_str ());
output.width = output.height = 0;
- output.points.clear ();
+ output.clear ();
return;
}
if (normals_->size () != surface_->size ())
{
PCL_ERROR ("[pcl::%s::computeFeature] The number of points in the input dataset differs from the number of points in the dataset containing the normals!\n", getClassName ().c_str ());
output.width = output.height = 0;
- output.points.clear ();
+ output.clear ();
return;
}
centroids_dominant_orientations_.clear ();
// ---[ Step 0: remove normals with high curvature
- std::vector<int> indices_out;
- std::vector<int> indices_in;
+ pcl::Indices indices_out;
+ pcl::Indices indices_in;
filterNormalsWithHighCurvature (*normals_, *indices_, indices_out, indices_in, curv_threshold_);
pcl::PointCloud<pcl::PointNormal>::Ptr normals_filtered_cloud (new pcl::PointCloud<pcl::PointNormal> ());
}
//compute modified VFH for all dominant clusters and add them to the list!
- output.points.resize (dominant_normals_.size ());
+ output.resize (dominant_normals_.size ());
output.width = dominant_normals_.size ();
for (std::size_t i = 0; i < dominant_normals_.size (); ++i)
pcl::PointCloud<pcl::VFHSignature308> vfh_signature;
vfh.compute (vfh_signature);
- output.points.resize (1);
+ output.resize (1);
output.width = 1;
output[0] = vfh_signature[0];
#include <pcl/common/distances.h>
#include <pcl/common/transforms.h>
#include <vector>
+#include <ctime> // for time
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointInT, typename PointOutT> void
if (!Feature<PointInT, PointOutT>::initCompute ())
{
output.width = output.height = 0;
- output.points.clear ();
+ output.clear ();
return;
}
// Copy the header
// (see http://dev.pointclouds.org/issues/657)
output.width = output.height = 1;
output.is_dense = input_->is_dense;
- output.points.resize (1);
+ output.resize (1);
// Perform the actual feature computation
computeFeature (output);
this->cleanup9 (local_cloud_);
// We only output _1_ signature
- output.points.resize (1);
+ output.resize (1);
output.width = 1;
output.height = 1;
#ifndef PCL_FEATURES_IMPL_FEATURE_H_
#define PCL_FEATURES_IMPL_FEATURE_H_
-#include <pcl/search/pcl_search.h>
+#include <pcl/search/kdtree.h> // for KdTree
+#include <pcl/search/organized.h> // for OrganizedNeighbor
namespace pcl
// for (int i = 0; i < 9; ++i)
// if (!std::isfinite (covariance_matrix.coeff (i)))
// {
-// //PCL_WARN ("[pcl::solvePlaneParameteres] Covariance matrix has NaN/Inf values!\n");
+// //PCL_WARN ("[pcl::solvePlaneParameters] Covariance matrix has NaN/Inf values!\n");
// nx = ny = nz = curvature = std::numeric_limits<float>::quiet_NaN ();
// return;
// }
search_parameter_ = search_radius_;
// Declare the search locator definition
search_method_surface_ = [this] (const PointCloudIn &cloud, int index, double radius,
- std::vector<int> &k_indices, std::vector<float> &k_distances)
+ pcl::Indices &k_indices, std::vector<float> &k_distances)
{
return tree_->radiusSearch (cloud, index, radius, k_indices, k_distances, 0);
};
{
search_parameter_ = k_;
// Declare the search locator definition
- search_method_surface_ = [this] (const PointCloudIn &cloud, int index, int k, std::vector<int> &k_indices,
+ search_method_surface_ = [this] (const PointCloudIn &cloud, int index, int k, pcl::Indices &k_indices,
std::vector<float> &k_distances)
{
return tree_->nearestKSearch (cloud, index, k, k_indices, k_distances);
if (!initCompute ())
{
output.width = output.height = 0;
- output.points.clear ();
+ output.clear ();
return;
}
output.header = input_->header;
// Resize the output dataset
- if (output.points.size () != indices_->size ())
- output.points.resize (indices_->size ());
+ if (output.size () != indices_->size ())
+ output.resize (indices_->size ());
// Check if the output will be computed for all points or only a subset
// If the input width or height are not set, set output width as size
if (normals_->points.size () != surface_->points.size ())
{
PCL_ERROR ("[pcl::%s::initCompute] ", getClassName ().c_str ());
- PCL_ERROR("The number of points in the input dataset (%zu) differs from ",
+ PCL_ERROR("The number of points in the surface dataset (%zu) differs from ",
static_cast<std::size_t>(surface_->points.size()));
PCL_ERROR("the number of points in the dataset containing the normals (%zu)!\n",
static_cast<std::size_t>(normals_->points.size()));
//find Z axis
//extract support points for the computation of Z axis
- std::vector<int> neighbours_indices;
+ pcl::Indices neighbours_indices;
std::vector<float> neighbours_distances;
const std::size_t n_normal_neighbours =
#include <pcl/common/point_tests.h> // for pcl::isFinite
#include <pcl/features/pfh_tools.h>
+#include <set> // for std::set
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointInT, typename PointNT, typename PointOutT> bool
template <typename PointInT, typename PointNT, typename PointOutT> void
pcl::FPFHEstimation<PointInT, PointNT, PointOutT>::computePointSPFHSignature (
const pcl::PointCloud<PointInT> &cloud, const pcl::PointCloud<PointNT> &normals,
- int p_idx, int row, const std::vector<int> &indices,
+ pcl::index_t p_idx, int row, const pcl::Indices &indices,
Eigen::MatrixXf &hist_f1, Eigen::MatrixXf &hist_f2, Eigen::MatrixXf &hist_f3)
{
Eigen::Vector4f pfh_tuple;
template <typename PointInT, typename PointNT, typename PointOutT> void
pcl::FPFHEstimation<PointInT, PointNT, PointOutT>::weightPointSPFHSignature (
const Eigen::MatrixXf &hist_f1, const Eigen::MatrixXf &hist_f2, const Eigen::MatrixXf &hist_f3,
- const std::vector<int> &indices, const std::vector<float> &dists, Eigen::VectorXf &fpfh_histogram)
+ const pcl::Indices &indices, const std::vector<float> &dists, Eigen::VectorXf &fpfh_histogram)
{
assert (indices.size () == dists.size ());
// @TODO: use arrays
{
// Allocate enough space to hold the NN search results
// \note This resize is irrelevant for a radiusSearch ().
- std::vector<int> nn_indices (k_);
+ pcl::Indices nn_indices (k_);
std::vector<float> nn_dists (k_);
std::set<int> spfh_indices;
{
// Allocate enough space to hold the NN search results
// \note This resize is irrelevant for a radiusSearch ().
- std::vector<int> nn_indices (k_);
+ pcl::Indices nn_indices (k_);
std::vector<float> nn_dists (k_);
std::vector<int> spfh_hist_lookup;
if (surface_ != input_ ||
indices_->size () != surface_->size ())
{
- std::vector<int> nn_indices (k_); // \note These resizes are irrelevant for a radiusSearch ().
+ pcl::Indices nn_indices (k_); // \note These resizes are irrelevant for a radiusSearch ().
std::vector<float> nn_dists (k_);
std::set<int> spfh_indices_set;
hist_f2_.setZero (data_size, nr_bins_f2_);
hist_f3_.setZero (data_size, nr_bins_f3_);
- std::vector<int> nn_indices (k_); // \note These resizes are irrelevant for a radiusSearch ().
+ pcl::Indices nn_indices (k_); // \note These resizes are irrelevant for a radiusSearch ().
std::vector<float> nn_dists (k_);
// Compute SPFH signatures for every point that needs them
int p_idx = spfh_indices_vec[i];
// Find the neighborhood around p_idx
- if (!isFinite ((*input_)[p_idx]) ||
+ if (!isFinite ((*surface_)[p_idx]) ||
this->searchForNeighbors (*surface_, p_idx, search_parameter_, nn_indices, nn_dists) == 0)
continue;
// ... and remap the nn_indices values so that they represent row indices in the spfh_hist_* matrices
// instead of indices into surface_->points
- for (int &nn_index : nn_indices)
+ for (auto &nn_index : nn_indices)
nn_index = spfh_hist_lookup[nn_index];
// Compute the FPFH signature (i.e. compute a weighted combination of local SPFH signatures) ...
#define PCL_FEATURES_IMPL_GASD_H_
#include <pcl/features/gasd.h>
+#include <pcl/common/common.h> // for getMinMax3D
#include <pcl/common/transforms.h>
#include <vector>
if (!Feature<PointInT, PointOutT>::initCompute ())
{
output.width = output.height = 0;
- output.points.clear ();
+ output.clear ();
return;
}
if (!Feature<PointInT, PointOutT>::initCompute ())
{
output.width = output.height = 0;
- output.points.clear ();
+ output.clear ();
return;
}
// Copy the header
// (see http://dev.pointclouds.org/issues/657)
output.width = output.height = 1;
output.is_dense = input_->is_dense;
- output.points.resize (1);
+ output.resize (1);
// Perform the actual feature computation
computeFeature (output);
std::vector<int> histogram;
for (std::size_t k = 0; k < intersected_cells.size (); ++k)
{
- std::vector<int> indices;
+ pcl::Indices indices;
octree.voxelSearch (intersected_cells[k], indices);
int label = emptyLabel ();
if (!indices.empty ())
output.clear ();
output.width = 1;
output.height = 1;
- output.points.resize (1);
+ output.resize (1);
std::copy (gfpfh_histogram.cbegin (), gfpfh_histogram.cend (), output[0].histogram);
}
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointInT, typename PointNT, typename PointOutT> std::uint32_t
-pcl::GFPFHEstimation<PointInT, PointNT, PointOutT>::getDominantLabel (const std::vector<int>& indices)
+pcl::GFPFHEstimation<PointInT, PointNT, PointOutT>::getDominantLabel (const pcl::Indices& indices)
{
std::vector<std::uint32_t> counts (getNumberOfClasses () + 1, 0);
- for (const int &nn_index : indices)
+ for (const auto &nn_index : indices)
{
std::uint32_t label = (*labels_)[nn_index].label;
counts[label] += 1;
#define PCL_FEATURES_IMPL_GRSD_H_
#include <pcl/features/grsd.h>
+#include <pcl/features/rsd.h> // for RSDEstimation
///////// STATIC /////////
template <typename PointInT, typename PointNT, typename PointOutT> int
pcl::GRSDEstimation<PointInT, PointNT, PointOutT>::getSimpleType (float min_radius, float max_radius,
{
PCL_ERROR ("[pcl::%s::computeFeature] A voxel cell width needs to be set!\n", getClassName ().c_str ());
output.width = output.height = 0;
- output.points.clear ();
+ output.clear ();
return;
}
}
// Save feature values
- output.points.resize (1);
+ output.resize (1);
output.height = output.width = 1;
int nrf = 0;
for (int i = 0; i < NR_CLASS + 1; i++)
#ifndef PCL_INTEGRAL_IMAGE2D_IMPL_H_
#define PCL_INTEGRAL_IMAGE2D_IMPL_H_
-#include <cstddef>
-
namespace pcl
{
{
ElementType* previous_row = &first_order_integral_image_[0];
ElementType* current_row = previous_row + (width_ + 1);
- memset (previous_row, 0, sizeof (ElementType) * (width_ + 1));
+ *previous_row = ElementType::Zero(width_ + 1);
unsigned* count_previous_row = &finite_values_integral_image_[0];
unsigned* count_current_row = count_previous_row + (width_ + 1);
{
SecondOrderType* so_previous_row = &second_order_integral_image_[0];
SecondOrderType* so_current_row = so_previous_row + (width_ + 1);
- memset (so_previous_row, 0, sizeof (SecondOrderType) * (width_ + 1));
+ *so_previous_row = SecondOrderType::Zero(width_ + 1);
SecondOrderType so_element;
for (unsigned rowIdx = 0; rowIdx < height_; ++rowIdx, data += row_stride,
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointInT, typename PointNT, typename PointOutT, typename IntensitySelectorT> void
pcl::IntensityGradientEstimation <PointInT, PointNT, PointOutT, IntensitySelectorT>::computePointIntensityGradient (
- const pcl::PointCloud <PointInT> &cloud, const std::vector <int> &indices,
+ const pcl::PointCloud <PointInT> &cloud, const pcl::Indices &indices,
const Eigen::Vector3f &point, float mean_intensity, const Eigen::Vector3f &normal, Eigen::Vector3f &gradient)
{
if (indices.size () < 3)
Eigen::Matrix3f A = Eigen::Matrix3f::Zero ();
Eigen::Vector3f b = Eigen::Vector3f::Zero ();
- for (const int &nn_index : indices)
+ for (const auto &nn_index : indices)
{
PointInT p = cloud[nn_index];
if (!std::isfinite (p.x) ||
{
// Allocate enough space to hold the results
// \note This resize is irrelevant for a radiusSearch ().
- std::vector<int> nn_indices (k_);
+ pcl::Indices nn_indices (k_);
std::vector<float> nn_dists (k_);
output.is_dense = true;
float mean_intensity = 0;
// Initialize to 0
centroid.setZero ();
- for (const int &nn_index : nn_indices)
+ for (const auto &nn_index : nn_indices)
{
centroid += (*surface_)[nn_index].getVector3fMap ();
mean_intensity += intensity_ ((*surface_)[nn_index]);
// Initialize to 0
centroid.setZero ();
unsigned cp = 0;
- for (const int &nn_index : nn_indices)
+ for (const auto &nn_index : nn_indices)
{
// Check if the point is invalid
if (!isFinite ((*surface_) [nn_index]))
pcl::IntensitySpinEstimation<PointInT, PointOutT>::computeIntensitySpinImage (
const PointCloudIn &cloud, float radius, float sigma,
int k,
- const std::vector<int> &indices,
+ const pcl::Indices &indices,
const std::vector<float> &squared_distances,
Eigen::MatrixXf &intensity_spin_image)
{
PCL_ERROR ("[pcl::%s::computeFeature] The search radius must be set before computing the feature!\n",
getClassName ().c_str ());
output.width = output.height = 0;
- output.points.clear ();
+ output.clear ();
return;
}
PCL_ERROR ("[pcl::%s::computeFeature] The number of intensity bins must be greater than zero!\n",
getClassName ().c_str ());
output.width = output.height = 0;
- output.points.clear ();
+ output.clear ();
return;
}
if (nr_distance_bins_ <= 0)
PCL_ERROR ("[pcl::%s::computeFeature] The number of distance bins must be greater than zero!\n",
getClassName ().c_str ());
output.width = output.height = 0;
- output.points.clear ();
+ output.clear ();
return;
}
Eigen::MatrixXf intensity_spin_image (nr_intensity_bins_, nr_distance_bins_);
// Allocate enough space to hold the radiusSearch results
- std::vector<int> nn_indices (surface_->size ());
+ pcl::Indices nn_indices (surface_->size ());
std::vector<float> nn_dist_sqr (surface_->size ());
output.is_dense = true;
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointInT, typename PointOutT> void
pcl::MomentInvariantsEstimation<PointInT, PointOutT>::computePointMomentInvariants (
- const pcl::PointCloud<PointInT> &cloud, const std::vector<int> &indices,
+ const pcl::PointCloud<PointInT> &cloud, const pcl::Indices &indices,
float &j1, float &j2, float &j3)
{
// Estimate the XYZ centroid
float mu200 = 0, mu020 = 0, mu002 = 0, mu110 = 0, mu101 = 0, mu011 = 0;
// Iterate over the nearest neighbors set
- for (const int &index : indices)
+ for (const auto &index : indices)
{
// Demean the points
temp_pt_[0] = cloud[index].x - xyz_centroid_[0];
{
// Allocate enough space to hold the results
// \note This resize is irrelevant for a radiusSearch ().
- std::vector<int> nn_indices (k_);
+ pcl::Indices nn_indices (k_);
std::vector<float> nn_dists (k_);
output.is_dense = true;
#ifndef PCL_MOMENT_OF_INERTIA_ESTIMATION_HPP_
#define PCL_MOMENT_OF_INERTIA_ESTIMATION_HPP_
+#include <Eigen/Eigenvalues> // for EigenSolver
+
#include <pcl/features/moment_of_inertia_estimation.h>
#include <pcl/features/feature.h>
template <typename PointT> void
pcl::MomentOfInertiaEstimation<PointT>::setInputCloud (const PointCloudConstPtr& cloud)
{
- input_ = cloud;
-
+ pcl::PCLBase<PointT>::setInputCloud (cloud);
is_valid_ = false;
}
template <typename PointT> void
pcl::MomentOfInertiaEstimation<PointT>::setIndices (const IndicesPtr& indices)
{
- indices_ = indices;
- fake_indices_ = false;
- use_indices_ = true;
-
+ pcl::PCLBase<PointT>::setIndices (indices);
is_valid_ = false;
}
template <typename PointT> void
pcl::MomentOfInertiaEstimation<PointT>::setIndices (const IndicesConstPtr& indices)
{
- indices_.reset (new std::vector<int> (*indices));
- fake_indices_ = false;
- use_indices_ = true;
-
+ pcl::PCLBase<PointT>::setIndices (indices);
is_valid_ = false;
}
template <typename PointT> void
pcl::MomentOfInertiaEstimation<PointT>::setIndices (const PointIndicesConstPtr& indices)
{
- indices_.reset (new std::vector<int> (indices->indices));
- fake_indices_ = false;
- use_indices_ = true;
-
+ pcl::PCLBase<PointT>::setIndices (indices);
is_valid_ = false;
}
template <typename PointT> void
pcl::MomentOfInertiaEstimation<PointT>::setIndices (std::size_t row_start, std::size_t col_start, std::size_t nb_rows, std::size_t nb_cols)
{
- if ((nb_rows > input_->height) || (row_start > input_->height))
- {
- PCL_ERROR ("[PCLBase::setIndices] cloud is only %d height", input_->height);
- return;
- }
-
- if ((nb_cols > input_->width) || (col_start > input_->width))
- {
- PCL_ERROR ("[PCLBase::setIndices] cloud is only %d width", input_->width);
- return;
- }
-
- const std::size_t row_end = row_start + nb_rows;
- if (row_end > input_->height)
- {
- PCL_ERROR ("[PCLBase::setIndices] %d is out of rows range %d", row_end, input_->height);
- return;
- }
-
- const std::size_t col_end = col_start + nb_cols;
- if (col_end > input_->width)
- {
- PCL_ERROR ("[PCLBase::setIndices] %d is out of columns range %d", col_end, input_->width);
- return;
- }
-
- indices_.reset (new std::vector<int>);
- indices_->reserve (nb_cols * nb_rows);
- for(std::size_t i = row_start; i < row_end; i++)
- for(std::size_t j = col_start; j < col_end; j++)
- indices_->push_back (static_cast<int> ((i * input_->width) + j));
- fake_indices_ = false;
- use_indices_ = true;
-
+ pcl::PCLBase<PointT>::setIndices (row_start, col_start, nb_rows, nb_cols);
is_valid_ = false;
}
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointSource, typename PointFeature> void
pcl::MultiscaleFeaturePersistence<PointSource, PointFeature>::determinePersistentFeatures (FeatureCloud &output_features,
- shared_ptr<std::vector<int> > &output_indices)
+ pcl::IndicesPtr &output_indices)
{
if (!initCompute ())
return;
{
if (unique_features_table_[scale_i][*feature_it] == true)
{
- output_features.points.push_back ((*features_at_scale_[scale_i])[*feature_it]);
+ output_features.push_back ((*features_at_scale_[scale_i])[*feature_it]);
output_indices->push_back (feature_estimator_->getIndices ()->at (*feature_it));
}
}
if (present_in_all)
{
- output_features.points.emplace_back ((*features_at_scale_.front ())[feature]);
+ output_features.emplace_back ((*features_at_scale_.front ())[feature]);
output_indices->emplace_back (feature_estimator_->getIndices ()->at (feature));
}
}
*/
#include <iostream>
-#include <map>
#include <pcl/common/norms.h> // for L1_Norm
#include <pcl/common/eigen.h>
{
// Allocate enough space to hold the results
// \note This resize is irrelevant for a radiusSearch ().
- std::vector<int> nn_indices (k_);
+ pcl::Indices nn_indices (k_);
std::vector<float> nn_dists (k_);
output.is_dense = true;
{
// Allocate enough space to hold the results
// \note This resize is irrelevant for a radiusSearch ().
- std::vector<int> nn_indices (k_);
+ pcl::Indices nn_indices (k_);
std::vector<float> nn_dists (k_);
output.is_dense = true;
return;
}
- std::vector<int> k_indices;
+ pcl::Indices k_indices;
std::vector<float> k_sqr_distances;
tree_->setInputCloud (input_);
- output.points.resize (indices_->size ());
+ output.resize (indices_->size ());
for (std::size_t index_i = 0; index_i < indices_->size (); ++index_i)
{
{
pcl::Label invalid_pt;
invalid_pt.label = unsigned (0);
- labels.points.resize (input_->size (), invalid_pt);
+ labels.resize (input_->size (), invalid_pt);
labels.width = input_->width;
labels.height = input_->height;
for (int d_idx = 0; d_idx < num_of_ngbr; d_idx++)
{
int nghr_idx = curr_idx + directions[d_idx].d_index;
- assert (nghr_idx >= 0 && nghr_idx < input_->size ());
+ assert (nghr_idx >= 0 && static_cast<std::size_t>(nghr_idx) < input_->size ());
if (!std::isfinite ((*input_)[nghr_idx].z))
{
found_invalid_neighbor = true;
for (const auto &direction : directions)
{
int nghr_idx = curr_idx + direction.d_index;
- assert (nghr_idx >= 0 && nghr_idx < input_->size ());
+ assert (nghr_idx >= 0 && static_cast<std::size_t>(nghr_idx) < input_->size ());
if (!std::isfinite ((*input_)[nghr_idx].z))
{
dx += direction.d_x;
{
pcl::Label invalid_pt;
invalid_pt.label = unsigned (0);
- labels.points.resize (input_->size (), invalid_pt);
+ labels.resize (input_->size (), invalid_pt);
labels.width = input_->width;
labels.height = input_->height;
{
pcl::Label invalid_pt;
invalid_pt.label = unsigned (0);
- labels.points.resize (input_->size (), invalid_pt);
+ labels.resize (input_->size (), invalid_pt);
labels.width = input_->width;
labels.height = input_->height;
{
pcl::Label invalid_pt;
invalid_pt.label = unsigned (0);
- labels.points.resize (input_->size (), invalid_pt);
+ labels.resize (input_->size (), invalid_pt);
labels.width = input_->width;
labels.height = input_->height;
#include <pcl/features/our_cvfh.h>
#include <pcl/features/vfh.h>
#include <pcl/features/normal_3d.h>
+#include <pcl/common/io.h> // for copyPointCloud
+#include <pcl/common/common.h> // for getMaxDistance
#include <pcl/common/transforms.h>
//////////////////////////////////////////////////////////////////////////////////////////////
if (!Feature<PointInT, PointOutT>::initCompute ())
{
output.width = output.height = 0;
- output.points.clear ();
+ output.clear ();
return;
}
// Resize the output dataset
// we risk at pre-allocating too much memory which could lead to bad_alloc
// (see http://dev.pointclouds.org/issues/657)
output.width = output.height = 1;
- output.points.resize (1);
+ output.resize (1);
// Perform the actual feature computation
computeFeature (output);
// Create a bool vector of processed point indices, and initialize it to false
std::vector<bool> processed (cloud.size (), false);
- std::vector<int> nn_indices;
+ pcl::Indices nn_indices;
std::vector<float> nn_distances;
// Process all points in the indices vector
for (std::size_t i = 0; i < cloud.size (); ++i)
//////////////////////////////////////////////////////////////////////////////////////////////
template<typename PointInT, typename PointNT, typename PointOutT> void
pcl::OURCVFHEstimation<PointInT, PointNT, PointOutT>::filterNormalsWithHighCurvature (const pcl::PointCloud<PointNT> & cloud,
- std::vector<int> &indices_to_use,
- std::vector<int> &indices_out, std::vector<int> &indices_in,
+ pcl::Indices &indices_to_use,
+ pcl::Indices &indices_out, pcl::Indices &indices_in,
float threshold)
{
indices_out.resize (cloud.size ());
std::size_t in, out;
in = out = 0;
- for (const int &index : indices_to_use)
+ for (const auto &index : indices_to_use)
{
if (cloud[index].curvature > threshold)
{
scatter.setZero ();
float sum_w = 0.f;
- for (const int &index : indices.indices)
+ for (const auto &index : indices.indices)
{
Eigen::Vector3f pvector = (*grid)[index].getVector3fMap ();
float d_k = (pvector).norm ();
//copy to the cvfh signature
PointCloudOut vfh_signature;
- vfh_signature.points.resize (1);
+ vfh_signature.resize (1);
vfh_signature.width = vfh_signature.height = 1;
for (int d = 0; d < 308; ++d)
vfh_signature[0].histogram[d] = output[i].histogram[d];
}
}
- if (!ourcvfh_output.points.empty ())
+ if (!ourcvfh_output.empty ())
{
ourcvfh_output.height = 1;
}
{
PCL_ERROR ("[pcl::%s::computeFeature] No input dataset containing normals was given!\n", getClassName ().c_str ());
output.width = output.height = 0;
- output.points.clear ();
+ output.clear ();
return;
}
if (normals_->size () != surface_->size ())
{
PCL_ERROR ("[pcl::%s::computeFeature] The number of points in the input dataset differs from the number of points in the dataset containing the normals!\n", getClassName ().c_str ());
output.width = output.height = 0;
- output.points.clear ();
+ output.clear ();
return;
}
dominant_normals_.clear ();
// ---[ Step 0: remove normals with high curvature
- std::vector<int> indices_out;
- std::vector<int> indices_in;
+ pcl::Indices indices_out;
+ pcl::Indices indices_in;
filterNormalsWithHighCurvature (*normals_, *indices_, indices_out, indices_in, curv_threshold_);
pcl::PointCloud<pcl::PointNormal>::Ptr normals_filtered_cloud (new pcl::PointCloud<pcl::PointNormal> ());
normals_filtered_cloud->height = 1;
normals_filtered_cloud->points.resize (normals_filtered_cloud->width);
- std::vector<int> indices_from_nfc_to_indices;
+ pcl::Indices indices_from_nfc_to_indices;
indices_from_nfc_to_indices.resize (indices_in.size ());
for (std::size_t i = 0; i < indices_in.size (); ++i)
}
//compute modified VFH for all dominant clusters and add them to the list!
- output.points.resize (dominant_normals_.size ());
+ output.resize (dominant_normals_.size ());
output.width = dominant_normals_.size ();
for (std::size_t i = 0; i < dominant_normals_.size (); ++i)
pcl::PointCloud<pcl::VFHSignature308> vfh_signature;
vfh.compute (vfh_signature);
- output.points.resize (1);
+ output.resize (1);
output.width = 1;
output[0] = vfh_signature[0];
#pragma once
#include <pcl/features/pfh.h>
+#include <pcl/features/pfh_tools.h> // for computePairFeatures
#include <pcl/common/point_tests.h> // for pcl::isFinite
template <typename PointInT, typename PointNT, typename PointOutT> void
pcl::PFHEstimation<PointInT, PointNT, PointOutT>::computePointPFHSignature (
const pcl::PointCloud<PointInT> &cloud, const pcl::PointCloud<PointNT> &normals,
- const std::vector<int> &indices, int nr_split, Eigen::VectorXf &pfh_histogram)
+ const pcl::Indices &indices, int nr_split, Eigen::VectorXf &pfh_histogram)
{
int h_index, h_p;
// Allocate enough space to hold the results
// \note This resize is irrelevant for a radiusSearch ().
- std::vector<int> nn_indices (k_);
+ pcl::Indices nn_indices (k_);
std::vector<float> nn_dists (k_);
output.is_dense = true;
template <typename PointInT, typename PointNT, typename PointOutT> void
pcl::PFHRGBEstimation<PointInT, PointNT, PointOutT>::computePointPFHRGBSignature (
const pcl::PointCloud<PointInT> &cloud, const pcl::PointCloud<PointNT> &normals,
- const std::vector<int> &indices, int nr_split, Eigen::VectorXf &pfhrgb_histogram)
+ const pcl::Indices &indices, int nr_split, Eigen::VectorXf &pfhrgb_histogram)
{
int h_index, h_p;
// Allocate enough space to hold the results
// \note This resize is irrelevant for a radiusSearch ().
- std::vector<int> nn_indices (k_);
+ pcl::Indices nn_indices (k_);
std::vector<float> nn_dists (k_);
// Iterating over the entire index vector
#include <pcl/features/ppf.h>
#include <pcl/features/pfh.h>
+#include <pcl/features/pfh_tools.h> // for computePairFeatures
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointInT, typename PointNT, typename PointOutT>
pcl::PPFEstimation<PointInT, PointNT, PointOutT>::computeFeature (PointCloudOut &output)
{
// Initialize output container - overwrite the sizes done by Feature::initCompute ()
- output.points.resize (indices_->size () * input_->size ());
+ output.resize (indices_->size () * input_->size ());
output.height = 1;
output.width = output.size ();
output.is_dense = true;
pcl::PPFRGBEstimation<PointInT, PointNT, PointOutT>::computeFeature (PointCloudOut &output)
{
// Initialize output container - overwrite the sizes done by Feature::initCompute ()
- output.points.resize (indices_->size () * input_->size ());
+ output.resize (indices_->size () * input_->size ());
output.height = 1;
output.width = output.size ();
output.resize (indices_->size ());
for (std::size_t index_i = 0; index_i < indices_->size (); ++index_i)
{
- int i = (*indices_)[index_i];
- std::vector<int> nn_indices;
+ auto i = (*indices_)[index_i];
+ pcl::Indices nn_indices;
std::vector<float> nn_distances;
tree_->radiusSearch (i, static_cast<float> (search_radius_), nn_indices, nn_distances);
average_feature_nn.f1 = average_feature_nn.f2 = average_feature_nn.f3 = average_feature_nn.f4 =
average_feature_nn.r_ratio = average_feature_nn.g_ratio = average_feature_nn.b_ratio = 0.0f;
- for (const int &j : nn_indices)
+ for (const auto &j : nn_indices)
{
if (i != j)
{
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointInT, typename PointNT, typename PointOutT> void
pcl::PrincipalCurvaturesEstimation<PointInT, PointNT, PointOutT>::computePointPrincipalCurvatures (
- const pcl::PointCloud<PointNT> &normals, int p_idx, const std::vector<int> &indices,
+ const pcl::PointCloud<PointNT> &normals, int p_idx, const pcl::Indices &indices,
float &pcx, float &pcy, float &pcz, float &pc1, float &pc2)
{
EIGEN_ALIGN16 Eigen::Matrix3f I = Eigen::Matrix3f::Identity ();
{
// Allocate enough space to hold the results
// \note This resize is irrelevant for a radiusSearch ().
- std::vector<int> nn_indices (k_);
+ pcl::Indices nn_indices (k_);
std::vector<float> nn_dists (k_);
output.is_dense = true;
template <typename PointInT, typename GradientT, typename PointOutT> void
pcl::RIFTEstimation<PointInT, GradientT, PointOutT>::computeRIFT (
const PointCloudIn &cloud, const PointCloudGradient &gradient,
- int p_idx, float radius, const std::vector<int> &indices,
+ int p_idx, float radius, const pcl::Indices &indices,
const std::vector<float> &sqr_distances, Eigen::MatrixXf &rift_descriptor)
{
if (indices.empty ())
PCL_ERROR ("[pcl::%s::computeFeature] The search radius must be set before computing the feature!\n",
getClassName ().c_str ());
output.width = output.height = 0;
- output.points.clear ();
+ output.clear ();
return;
}
PCL_ERROR ("[pcl::%s::computeFeature] The number of gradient bins must be greater than zero!\n",
getClassName ().c_str ());
output.width = output.height = 0;
- output.points.clear ();
+ output.clear ();
return;
}
if (nr_distance_bins_ <= 0)
PCL_ERROR ("[pcl::%s::computeFeature] The number of distance bins must be greater than zero!\n",
getClassName ().c_str ());
output.width = output.height = 0;
- output.points.clear ();
+ output.clear ();
return;
}
{
PCL_ERROR ("[pcl::%s::computeFeature] No input gradient was given!\n", getClassName ().c_str ());
output.width = output.height = 0;
- output.points.clear ();
+ output.clear ();
return;
}
if (gradient_->size () != surface_->size ())
PCL_ERROR ("[pcl::%s::computeFeature] ", getClassName ().c_str ());
PCL_ERROR ("The number of points in the input dataset differs from the number of points in the gradient!\n");
output.width = output.height = 0;
- output.points.clear ();
+ output.clear ();
return;
}
Eigen::MatrixXf rift_descriptor (nr_distance_bins_, nr_gradient_bins_);
- std::vector<int> nn_indices;
+ pcl::Indices nn_indices;
std::vector<float> nn_dist_sqr;
// Iterating over the entire index vector
#include <pcl/features/rops_estimation.h>
#include <array>
+#include <numeric> // for accumulate
+#include <Eigen/Eigenvalues> // for EigenSolver
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointInT, typename PointOutT>
{
if (triangles_.empty ())
{
- output.points.clear ();
+ output.clear ();
return;
}
//feature size = number_of_rotations * number_of_axis_to_rotate_around * number_of_projections * number_of_central_moments
unsigned int feature_size = number_of_rotations_ * 3 * 3 * 5;
const auto number_of_points = indices_->size ();
- output.points.clear ();
- output.points.reserve (number_of_points);
+ output.clear ();
+ output.reserve (number_of_points);
for (const auto& idx: *indices_)
{
std::set <unsigned int> local_triangles;
- std::vector <int> local_points;
+ pcl::Indices local_points;
getLocalSurface ((*input_)[idx], local_triangles, local_points);
Eigen::Matrix3f lrf_matrix;
else
invert_norm = 1.0f / norm;
- output.points.emplace_back ();
+ output.emplace_back ();
for (std::size_t i_dim = 0; i_dim < feature_size; i_dim++)
- output.points.back().histogram[i_dim] = feature[i_dim] * invert_norm;
+ output.back().histogram[i_dim] = feature[i_dim] * invert_norm;
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointInT, typename PointOutT> void
-pcl::ROPSEstimation <PointInT, PointOutT>::getLocalSurface (const PointInT& point, std::set <unsigned int>& local_triangles, std::vector <int>& local_points) const
+pcl::ROPSEstimation <PointInT, PointOutT>::getLocalSurface (const PointInT& point, std::set <unsigned int>& local_triangles, pcl::Indices& local_points) const
{
std::vector <float> distances;
tree_->radiusSearch (point, support_radius_, local_points, distances);
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointInT, typename PointOutT> void
-pcl::ROPSEstimation <PointInT, PointOutT>::transformCloud (const PointInT& point, const Eigen::Matrix3f& matrix, const std::vector <int>& local_points, PointCloudIn& transformed_cloud) const
+pcl::ROPSEstimation <PointInT, PointOutT>::transformCloud (const PointInT& point, const Eigen::Matrix3f& matrix, const pcl::Indices& local_points, PointCloudIn& transformed_cloud) const
{
const auto number_of_points = local_points.size ();
- transformed_cloud.points.clear ();
- transformed_cloud.points.reserve (number_of_points);
+ transformed_cloud.clear ();
+ transformed_cloud.reserve (number_of_points);
for (const auto& idx: local_points)
{
new_point.x = transformed_point (0);
new_point.y = transformed_point (1);
new_point.z = transformed_point (2);
- transformed_cloud.points.emplace_back (new_point);
+ transformed_cloud.emplace_back (new_point);
}
}
rotated_cloud.header = cloud.header;
rotated_cloud.width = number_of_points;
rotated_cloud.height = 1;
- rotated_cloud.points.clear ();
- rotated_cloud.points.reserve (number_of_points);
+ rotated_cloud.clear ();
+ rotated_cloud.reserve (number_of_points);
min (0) = std::numeric_limits <float>::max ();
min (1) = std::numeric_limits <float>::max ();
rotated_point.x = point (0);
rotated_point.y = point (1);
rotated_point.z = point (2);
- rotated_cloud.points.emplace_back (rotated_point);
+ rotated_cloud.emplace_back (rotated_point);
for (int i = 0; i < 3; ++i)
{
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointInT, typename PointNT, typename PointOutT> Eigen::MatrixXf
pcl::computeRSD (const pcl::PointCloud<PointInT> &surface, const pcl::PointCloud<PointNT> &normals,
- const std::vector<int> &indices, double max_dist,
+ const pcl::Indices &indices, double max_dist,
int nr_subdiv, double plane_radius, PointOutT &radii, bool compute_histogram)
{
// Check if the full histogram has to be saved or not
min_max_angle_by_dist[0][0] = min_max_angle_by_dist[0][1] = 0.0;
// Compute distance by normal angle distribution for points
- std::vector<int>::const_iterator i, begin (indices.begin()), end (indices.end());
+ pcl::Indices::const_iterator i, begin (indices.begin()), end (indices.end());
for (i = begin+1; i != end; ++i)
{
// compute angle between the two lines going through normals (disregard orientation!)
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointNT, typename PointOutT> Eigen::MatrixXf
pcl::computeRSD (const pcl::PointCloud<PointNT> &normals,
- const std::vector<int> &indices, const std::vector<float> &sqr_dists, double max_dist,
+ const pcl::Indices &indices, const std::vector<float> &sqr_dists, double max_dist,
int nr_subdiv, double plane_radius, PointOutT &radii, bool compute_histogram)
{
// Check if the full histogram has to be saved or not
}
// Compute distance by normal angle distribution for points
- std::vector<int>::const_iterator i, begin (indices.begin()), end (indices.end());
+ pcl::Indices::const_iterator i, begin (indices.begin()), end (indices.end());
for (i = begin+1; i != end; ++i)
{
// compute angle between the two lines going through normals (disregard orientation!)
{
PCL_ERROR ("[pcl::%s::computeFeature] A search radius needs to be set!\n", getClassName ().c_str ());
output.width = output.height = 0;
- output.points.clear ();
+ output.clear ();
return;
}
// List of indices and corresponding squared distances for a neighborhood
// \note resize is irrelevant for a radiusSearch ().
- std::vector<int> nn_indices;
+ pcl::Indices nn_indices;
std::vector<float> nn_sqr_dists;
// Check if the full histogram has to be saved or not
#include <pcl/features/shot.h>
#include <pcl/features/shot_lrf.h>
+#include <pcl/common/colors.h> // for RGB2sRGB_LUT, XYZ2LAB_LUT
+
// Useful constants.
#define PST_PI 3.1415926535897932384626433832795
#define PST_RAD_45 0.78539816339744830961566084581988
}
//////////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointInT, typename PointNT, typename PointOutT, typename PointRFT> float
-pcl::SHOTColorEstimation<PointInT, PointNT, PointOutT, PointRFT>::sRGB_LUT[256] = {- 1};
+template <typename PointInT, typename PointNT, typename PointOutT, typename PointRFT>
+std::array<float, 256>
+ pcl::SHOTColorEstimation<PointInT, PointNT, PointOutT, PointRFT>::sRGB_LUT = pcl::RGB2sRGB_LUT<float, 8>();
//////////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointInT, typename PointNT, typename PointOutT, typename PointRFT> float
-pcl::SHOTColorEstimation<PointInT, PointNT, PointOutT, PointRFT>::sXYZ_LUT[4000] = {- 1};
+template <typename PointInT, typename PointNT, typename PointOutT, typename PointRFT>
+std::array<float, 4000>
+ pcl::SHOTColorEstimation<PointInT, PointNT, PointOutT, PointRFT>::sXYZ_LUT = pcl::XYZ2LAB_LUT<float, 4000>();
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointInT, typename PointNT, typename PointOutT, typename PointRFT> void
unsigned char B, float &L, float &A,
float &B2)
{
- // @TODO: C++17 supports constexpr lambda for compile time init
- if (sRGB_LUT[0] < 0)
- {
- for (int i = 0; i < 256; i++)
- {
- float f = static_cast<float> (i) / 255.0f;
- if (f > 0.04045)
- sRGB_LUT[i] = powf ((f + 0.055f) / 1.055f, 2.4f);
- else
- sRGB_LUT[i] = f / 12.92f;
- }
-
- for (int i = 0; i < 4000; i++)
- {
- float f = static_cast<float> (i) / 4000.0f;
- if (f > 0.008856)
- sXYZ_LUT[i] = static_cast<float> (powf (f, 0.3333f));
- else
- sXYZ_LUT[i] = static_cast<float>((7.787 * f) + (16.0 / 116.0));
- }
- }
-
float fr = sRGB_LUT[R];
float fg = sRGB_LUT[G];
float fb = sRGB_LUT[B];
template <typename PointInT, typename PointNT, typename PointOutT, typename PointRFT> void
pcl::SHOTEstimationBase<PointInT, PointNT, PointOutT, PointRFT>::createBinDistanceShape (
int index,
- const std::vector<int> &indices,
+ const pcl::Indices &indices,
std::vector<double> &bin_distance_shape)
{
bin_distance_shape.resize (indices.size ());
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointInT, typename PointNT, typename PointOutT, typename PointRFT> void
pcl::SHOTEstimationBase<PointInT, PointNT, PointOutT, PointRFT>::interpolateSingleChannel (
- const std::vector<int> &indices,
+ const pcl::Indices &indices,
const std::vector<float> &sqr_dists,
const int index,
std::vector<double> &binDistance,
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointInT, typename PointNT, typename PointOutT, typename PointRFT> void
pcl::SHOTColorEstimation<PointInT, PointNT, PointOutT, PointRFT>::interpolateDoubleChannel (
- const std::vector<int> &indices,
+ const pcl::Indices &indices,
const std::vector<float> &sqr_dists,
const int index,
std::vector<double> &binDistanceShape,
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointInT, typename PointNT, typename PointOutT, typename PointRFT> void
pcl::SHOTColorEstimation<PointInT, PointNT, PointOutT, PointRFT>::computePointSHOT (
- const int index, const std::vector<int> &indices, const std::vector<float> &sqr_dists, Eigen::VectorXf &shot)
+ const int index, const pcl::Indices &indices, const std::vector<float> &sqr_dists, Eigen::VectorXf &shot)
{
// Clear the resultant shot
shot.setZero ();
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointInT, typename PointNT, typename PointOutT, typename PointRFT> void
pcl::SHOTEstimation<PointInT, PointNT, PointOutT, PointRFT>::computePointSHOT (
- const int index, const std::vector<int> &indices, const std::vector<float> &sqr_dists, Eigen::VectorXf &shot)
+ const int index, const pcl::Indices &indices, const std::vector<float> &sqr_dists, Eigen::VectorXf &shot)
{
//Skip the current feature if the number of its neighbors is not sufficient for its description
if (indices.size () < 5)
// Allocate enough space to hold the results
// \note This resize is irrelevant for a radiusSearch ().
- std::vector<int> nn_indices (k_);
+ pcl::Indices nn_indices (k_);
std::vector<float> nn_dists (k_);
output.is_dense = true;
// Allocate enough space to hold the results
// \note This resize is irrelevant for a radiusSearch ().
- std::vector<int> nn_indices (k_);
+ pcl::Indices nn_indices (k_);
std::vector<float> nn_dists (k_);
output.is_dense = true;
#ifndef PCL_FEATURES_IMPL_SHOT_LRF_H_
#define PCL_FEATURES_IMPL_SHOT_LRF_H_
+#include <Eigen/Eigenvalues> // for SelfAdjointEigenSolver
#include <utility>
#include <pcl/features/shot_lrf.h>
pcl::SHOTLocalReferenceFrameEstimation<PointInT, PointOutT>::getLocalRF (const int& current_point_idx, Eigen::Matrix3f &rf)
{
const Eigen::Vector4f& central_point = (*input_)[current_point_idx].getVector4fMap ();
- std::vector<int> n_indices;
+ pcl::Indices n_indices;
std::vector<float> n_sqr_distances;
this->searchForNeighbors (current_point_idx, search_parameter_, n_indices, n_sqr_distances);
#ifndef PCL_FEATURES_IMPL_SHOT_LRF_OMP_H_
#define PCL_FEATURES_IMPL_SHOT_LRF_OMP_H_
-#include <utility>
#include <pcl/features/shot_lrf_omp.h>
#include <pcl/features/shot_lrf.h>
//output_rf.confidence = getLocalRF ((*indices_)[i], rf);
//if (output_rf.confidence == std::numeric_limits<float>::max ())
- std::vector<int> n_indices;
+ pcl::Indices n_indices;
std::vector<float> n_sqr_distances;
this->searchForNeighbors ((*indices_)[i], search_parameter_, n_indices, n_sqr_distances);
if (getLocalRF ((*indices_)[i], rf) == std::numeric_limits<float>::max ())
// Allocate enough space to hold the results
// \note This resize is irrelevant for a radiusSearch ().
- std::vector<int> nn_indices (k_);
+ pcl::Indices nn_indices (k_);
std::vector<float> nn_dists (k_);
if (!isFinite ((*input_)[(*indices_)[idx]]) || lrf_is_nan || this->searchForNeighbors ((*indices_)[idx], search_parameter_, nn_indices,
// Allocate enough space to hold the results
// \note This resize is irrelevant for a radiusSearch ().
- std::vector<int> nn_indices (k_);
+ pcl::Indices nn_indices (k_);
std::vector<float> nn_dists (k_);
bool lrf_is_nan = false;
else
bin_size = search_radius_ / image_width_ / sqrt(2.0);
- std::vector<int> nn_indices;
+ pcl::Indices nn_indices;
std::vector<float> nn_sqr_dists;
const int neighb_cnt = this->searchForNeighbors (index, search_radius_, nn_indices, nn_sqr_dists);
if (neighb_cnt < static_cast<int> (min_pts_neighb_))
#include <pcl/features/statistical_multiscale_interest_region_extraction.h>
#include <pcl/kdtree/kdtree_flann.h>
#include <pcl/common/distances.h>
-#include <pcl/features/boost.h>
+#include <pcl/console/print.h> // for PCL_INFO, PCL_ERROR
#include <boost/graph/adjacency_list.hpp>
#include <boost/graph/johnson_all_pairs_shortest.hpp>
for (std::size_t point_i = 0; point_i < input_->size (); ++point_i)
{
- std::vector<int> k_indices (16);
+ pcl::Indices k_indices (16);
std::vector<float> k_distances (16);
kdtree.nearestKSearch (static_cast<int> (point_i), 16, k_indices, k_distances);
std::vector<int> nn_indices;
geodesicFixedRadiusSearch (point_i, scale_values_[scale_i], nn_indices);
bool is_max_point = true, is_min_point = true;
- for (const int &nn_index : nn_indices)
+ for (const auto &nn_index : nn_indices)
if (F_scales_[scale_i][point_i] < F_scales_[scale_i][nn_index])
is_max_point = false;
else
(is_max[scale_i - 1][point_i] && is_max[scale_i][point_i] && is_max[scale_i + 1][point_i]))
{
// add the point to the result vector
- IndicesPtr region (new std::vector<int>);
+ IndicesPtr region (new pcl::Indices);
region->push_back (static_cast<int> (point_i));
// and also add its scale-sized geodesic neighborhood
#pragma once
+#include <numeric> // for partial_sum
#include <pcl/features/usc.h>
#include <pcl/features/shot_lrf.h>
#include <pcl/common/angles.h>
(*frames_)[index].z_axis[2]);
// Find every point within specified search_radius_
- std::vector<int> nn_indices;
+ pcl::Indices nn_indices;
std::vector<float> nn_dists;
const std::size_t neighb_cnt = searchForNeighbors ((*indices_)[index], search_radius_, nn_indices, nn_dists);
// For each point within radius
const auto l = std::distance(phi_divisions_.cbegin (), std::prev(phi_min));
/// Local point density = number of points in a sphere of radius "point_density_radius_" around the current neighbour
- std::vector<int> neighbour_indices;
+ pcl::Indices neighbour_indices;
std::vector<float> neighbour_didtances;
float point_density = static_cast<float> (searchForNeighbors (*surface_, nn_indices[ne], point_density_radius_, neighbour_indices, neighbour_didtances));
/// point_density is always bigger than 0 because FindPointsWithinRadius returns at least the point itself
if (!initCompute ())
{
output.width = output.height = 0;
- output.points.clear ();
+ output.clear ();
return;
}
// Copy the header
// (see http://dev.pointclouds.org/issues/657)
output.width = output.height = 1;
output.is_dense = input_->is_dense;
- output.points.resize (1);
+ output.resize (1);
// Perform the actual feature computation
computeFeature (output);
const Eigen::Vector4f ¢roid_n,
const pcl::PointCloud<PointInT> &cloud,
const pcl::PointCloud<PointNT> &normals,
- const std::vector<int> &indices)
+ const pcl::Indices &indices)
{
Eigen::Vector4f pfh_tuple;
// Reset the whole thing
hist_incr_size_component = hist_incr;
// Iterate over all the points in the neighborhood
- for (const int &index : indices)
+ for (const auto &index : indices)
{
// Compute the pair P to NNi
if (!computePairFeatures (centroid_p, centroid_n, cloud[index].getVector4fMap (),
}
// We only output _1_ signature
- output.points.resize (1);
+ output.resize (1);
output.width = 1;
output.height = 1;
#include <pcl/memory.h>
#include <pcl/pcl_macros.h>
#include <pcl/point_cloud.h>
-#include <pcl/point_types.h>
#include <pcl/features/feature.h>
#include <pcl/features/integral_image2D.h>
*/
void
computePointIntensityGradient (const pcl::PointCloud<PointInT> &cloud,
- const std::vector<int> &indices,
+ const pcl::Indices &indices,
const Eigen::Vector3f &point,
float mean_intensity,
const Eigen::Vector3f &normal,
void
computeIntensitySpinImage (const PointCloudIn &cloud,
float radius, float sigma, int k,
- const std::vector<int> &indices,
+ const pcl::Indices &indices,
const std::vector<float> &squared_distances,
Eigen::MatrixXf &intensity_spin_image);
#pragma once
-#include <pcl/point_cloud.h>
-#include <pcl/point_types.h>
#include <pcl/features/feature.h>
namespace pcl
*/
void
computePointMomentInvariants (const pcl::PointCloud<PointInT> &cloud,
- const std::vector<int> &indices,
+ const pcl::Indices &indices,
float &j1, float &j2, float &j3);
/** \brief Compute the 3 moment invariants (j1, j2, j3) for a given set of points, using their indices.
#include <pcl/memory.h>
#include <pcl/pcl_macros.h>
#include <pcl/pcl_base.h>
-#include <pcl/PointIndices.h>
namespace pcl
{
*/
void
determinePersistentFeatures (FeatureCloud &output_features,
- shared_ptr<std::vector<int> > &output_indices);
+ pcl::IndicesPtr &output_indices);
/** \brief Method for setting the scale parameters for the algorithm
* \param scale_values vector of scales to determine the characteristic of each scaling step
#include <pcl/memory.h>
#include <pcl/pcl_macros.h>
-#include <pcl/features/eigen.h>
#include <pcl/point_cloud.h>
#include <pcl/point_representation.h>
// =====CONSTRUCTOR & DESTRUCTOR=====
/** Constructor */
- NarfDescriptor (const RangeImage* range_image=nullptr, const std::vector<int>* indices=nullptr);
+ NarfDescriptor (const RangeImage* range_image=nullptr, const pcl::Indices* indices=nullptr);
/** Destructor */
~NarfDescriptor();
// =====METHODS=====
//! Set input data
void
- setRangeImage (const RangeImage* range_image, const std::vector<int>* indices=nullptr);
+ setRangeImage (const RangeImage* range_image, const pcl::Indices* indices=nullptr);
//! Overwrite the compute function of the base class
void
* \ingroup features
*/
template <typename PointT> inline bool
- computePointNormal (const pcl::PointCloud<PointT> &cloud, const std::vector<int> &indices,
+ computePointNormal (const pcl::PointCloud<PointT> &cloud, const pcl::Indices &indices,
Eigen::Vector4f &plane_parameters, float &curvature)
{
// Placeholder for the 3x3 covariance matrix at each surface patch
*/
template<typename PointNT> inline bool
flipNormalTowardsNormalsMean ( pcl::PointCloud<PointNT> const &normal_cloud,
- std::vector<int> const &normal_indices,
+ pcl::Indices const &normal_indices,
Eigen::Vector3f &normal)
{
Eigen::Vector3f normal_mean = Eigen::Vector3f::Zero ();
- for (const int &normal_index : normal_indices)
+ for (const auto &normal_index : normal_indices)
{
const PointNT& cur_pt = normal_cloud[normal_index];
* \f]
*/
inline bool
- computePointNormal (const pcl::PointCloud<PointInT> &cloud, const std::vector<int> &indices,
+ computePointNormal (const pcl::PointCloud<PointInT> &cloud, const pcl::Indices &indices,
Eigen::Vector4f &plane_parameters, float &curvature)
{
if (indices.size () < 3 ||
* \f]
*/
inline bool
- computePointNormal (const pcl::PointCloud<PointInT> &cloud, const std::vector<int> &indices,
+ computePointNormal (const pcl::PointCloud<PointInT> &cloud, const pcl::Indices &indices,
float &nx, float &ny, float &nz, float &curvature)
{
if (indices.size () < 3 ||
#pragma once
#include <pcl/features/feature.h>
-#include <pcl/search/pcl_search.h>
-#include <pcl/common/common.h>
namespace pcl
{
* \param[in] threshold threshold value for curvature
*/
void
- filterNormalsWithHighCurvature (const pcl::PointCloud<PointNT> & cloud, std::vector<int> & indices_to_use, std::vector<int> &indices_out,
- std::vector<int> &indices_in, float threshold);
+ filterNormalsWithHighCurvature (const pcl::PointCloud<PointNT> & cloud, pcl::Indices & indices_to_use, pcl::Indices &indices_out,
+ pcl::Indices &indices_in, float threshold);
/** \brief Set the viewpoint.
* \param[in] vpx the X coordinate of the viewpoint
#include <pcl/point_types.h>
#include <pcl/features/feature.h>
-#include <pcl/features/pfh_tools.h>
#include <map>
+#include <queue> // for std::queue
namespace pcl
{
*/
void
computePointPFHSignature (const pcl::PointCloud<PointInT> &cloud, const pcl::PointCloud<PointNT> &normals,
- const std::vector<int> &indices, int nr_split, Eigen::VectorXf &pfh_histogram);
+ const pcl::Indices &indices, int nr_split, Eigen::VectorXf &pfh_histogram);
protected:
/** \brief Estimate the Point Feature Histograms (PFH) descriptors at a set of points given by
#pragma once
#include <pcl/features/feature.h>
-#include <pcl/features/pfh_tools.h>
namespace pcl
{
void
computePointPFHRGBSignature (const pcl::PointCloud<PointInT> &cloud, const pcl::PointCloud<PointNT> &normals,
- const std::vector<int> &indices, int nr_split, Eigen::VectorXf &pfhrgb_histogram);
+ const pcl::Indices &indices, int nr_split, Eigen::VectorXf &pfhrgb_histogram);
protected:
void
#pragma once
#include <pcl/features/feature.h>
-#include <pcl/features/boost.h>
namespace pcl
{
#pragma once
-#include <pcl/features/eigen.h>
#include <pcl/features/feature.h>
namespace pcl
*/
void
computePointPrincipalCurvatures (const pcl::PointCloud<PointNT> &normals,
- int p_idx, const std::vector<int> &indices,
+ int p_idx, const pcl::Indices &indices,
float &pcx, float &pcy, float &pcz, float &pc1, float &pc2);
protected:
*/
void
computeRIFT (const PointCloudIn &cloud, const PointCloudGradient &gradient, int p_idx, float radius,
- const std::vector<int> &indices, const std::vector<float> &squared_distances,
+ const pcl::Indices &indices, const std::vector<float> &squared_distances,
Eigen::MatrixXf &rift_descriptor);
protected:
#include <pcl/memory.h>
#include <pcl/pcl_macros.h>
+#include <pcl/Vertices.h> // for Vertices
#include <pcl/features/feature.h>
#include <set>
* \param[out] local_points stores the indices of the points that belong to the local surface
*/
void
- getLocalSurface (const PointInT& point, std::set <unsigned int>& local_triangles, std::vector <int>& local_points) const;
+ getLocalSurface (const PointInT& point, std::set <unsigned int>& local_triangles, pcl::Indices& local_points) const;
/** \brief This method computes LRF (Local Reference Frame) matrix for the given point.
* \param[in] point point for which the LRF is computed
* \param[out] transformed_cloud stores the transformed cloud
*/
void
- transformCloud (const PointInT& point, const Eigen::Matrix3f& matrix, const std::vector <int>& local_points, PointCloudIn& transformed_cloud) const;
+ transformCloud (const PointInT& point, const Eigen::Matrix3f& matrix, const pcl::Indices& local_points, PointCloudIn& transformed_cloud) const;
/** \brief This method rotates the cloud around the given axis and computes AABB of the rotated cloud.
* \param[in] axis axis around which cloud must be rotated
template <int N> void
getFeaturePointCloud (const std::vector<Eigen::MatrixXf, Eigen::aligned_allocator<Eigen::MatrixXf> > &histograms2D, PointCloud<Histogram<N> > &histogramsPC)
{
- histogramsPC.points.resize (histograms2D.size ());
+ histogramsPC.resize (histograms2D.size ());
histogramsPC.width = histograms2D.size ();
histogramsPC.height = 1;
histogramsPC.is_dense = true;
const int rows = histograms2D.at(0).rows();
const int cols = histograms2D.at(0).cols();
- typename PointCloud<Histogram<N> >::VectorType::iterator it = histogramsPC.points.begin ();
+ typename PointCloud<Histogram<N> >::VectorType::iterator it = histogramsPC.begin ();
for (const Eigen::MatrixXf& h : histograms2D)
{
Eigen::Map<Eigen::MatrixXf> histogram (&(it->histogram[0]), rows, cols);
*/
template <typename PointInT, typename PointNT, typename PointOutT> Eigen::MatrixXf
computeRSD (const pcl::PointCloud<PointInT> &surface, const pcl::PointCloud<PointNT> &normals,
- const std::vector<int> &indices, double max_dist,
+ const pcl::Indices &indices, double max_dist,
int nr_subdiv, double plane_radius, PointOutT &radii, bool compute_histogram = false);
- template <typename PointInT, typename PointNT, typename PointOutT>
- PCL_DEPRECATED(1, 12, "use computeRSD() overload that takes input point clouds by const reference")
- Eigen::MatrixXf
- computeRSD (typename pcl::PointCloud<PointInT>::ConstPtr &surface, typename pcl::PointCloud<PointNT>::ConstPtr &normals,
- const std::vector<int> &indices, double max_dist,
- int nr_subdiv, double plane_radius, PointOutT &radii, bool compute_histogram = false)
- {
- return computeRSD (*surface, *normals, indices, max_dist, nr_subdiv, plane_radius, radii, compute_histogram);
- }
-
/** \brief Estimate the Radius-based Surface Descriptor (RSD) for a given point based on its spatial neighborhood of 3D points with normals
* \param[in] normals the dataset containing the surface normals at each point in the dataset
* \param[in] indices the neighborhood point indices in the dataset (first point is used as the reference)
*/
template <typename PointNT, typename PointOutT> Eigen::MatrixXf
computeRSD (const pcl::PointCloud<PointNT> &normals,
- const std::vector<int> &indices, const std::vector<float> &sqr_dists, double max_dist,
+ const pcl::Indices &indices, const std::vector<float> &sqr_dists, double max_dist,
int nr_subdiv, double plane_radius, PointOutT &radii, bool compute_histogram = false);
- template <typename PointNT, typename PointOutT>
- PCL_DEPRECATED(1, 12, "use computeRSD() overload that takes input point cloud by const reference")
- Eigen::MatrixXf
- computeRSD (typename pcl::PointCloud<PointNT>::ConstPtr &normals,
- const std::vector<int> &indices, const std::vector<float> &sqr_dists, double max_dist,
- int nr_subdiv, double plane_radius, PointOutT &radii, bool compute_histogram = false)
- {
- return computeRSD (*normals, indices, sqr_dists, max_dist, nr_subdiv, plane_radius, radii, compute_histogram);
- }
-
/** \brief @b RSDEstimation estimates the Radius-based Surface Descriptor (minimal and maximal radius of the local surface's curves)
* for a given point cloud dataset containing points and normals.
*
#include <pcl/point_types.h>
#include <pcl/features/feature.h>
+#include <array> // for sRGB_LUT, sXYZ_LUT
+
namespace pcl
{
/** \brief SHOTEstimation estimates the Signature of Histograms of OrienTations (SHOT) descriptor for
{
feature_name_ = "SHOTEstimation";
};
-
public:
*/
virtual void
computePointSHOT (const int index,
- const std::vector<int> &indices,
+ const pcl::Indices &indices,
const std::vector<float> &sqr_dists,
Eigen::VectorXf &shot) = 0;
* \param[out] shot the resultant SHOT histogram
*/
void
- interpolateSingleChannel (const std::vector<int> &indices,
+ interpolateSingleChannel (const pcl::Indices &indices,
const std::vector<float> &sqr_dists,
const int index,
std::vector<double> &binDistance,
* \param[out] bin_distance_shape the resultant histogram
*/
void
- createBinDistanceShape (int index, const std::vector<int> &indices,
+ createBinDistanceShape (int index, const pcl::Indices &indices,
std::vector<double> &bin_distance_shape);
/** \brief The number of bins in each shape histogram. */
*/
void
computePointSHOT (const int index,
- const std::vector<int> &indices,
+ const pcl::Indices &indices,
const std::vector<float> &sqr_dists,
Eigen::VectorXf &shot) override;
protected:
*/
void
computePointSHOT (const int index,
- const std::vector<int> &indices,
+ const pcl::Indices &indices,
const std::vector<float> &sqr_dists,
Eigen::VectorXf &shot) override;
protected:
* \param[out] shot the resultant SHOT histogram
*/
void
- interpolateDoubleChannel (const std::vector<int> &indices,
+ interpolateDoubleChannel (const pcl::Indices &indices,
const std::vector<float> &sqr_dists,
const int index,
std::vector<double> &binDistanceShape,
static void
RGB2CIELAB (unsigned char R, unsigned char G, unsigned char B, float &L, float &A, float &B2);
- static float sRGB_LUT[256];
- static float sXYZ_LUT[4000];
+ static std::array<float, 256> sRGB_LUT;
+ static std::array<float, 4000> sXYZ_LUT;
};
}
class StatisticalMultiscaleInterestRegionExtraction : public PCLBase<PointT>
{
public:
- using IndicesPtr = shared_ptr<std::vector<int> >;
+ using IndicesPtr = shared_ptr<pcl::Indices >;
using Ptr = shared_ptr<StatisticalMultiscaleInterestRegionExtraction<PointT> >;
using ConstPtr = shared_ptr<const StatisticalMultiscaleInterestRegionExtraction<PointT> >;
void
computePointSPFHSignature (const Eigen::Vector4f ¢roid_p, const Eigen::Vector4f ¢roid_n,
const pcl::PointCloud<PointInT> &cloud, const pcl::PointCloud<PointNT> &normals,
- const std::vector<int> &indices);
+ const pcl::Indices &indices);
/** \brief Set the viewpoint.
* \param[in] vpx the X coordinate of the viewpoint
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-NarfDescriptor::NarfDescriptor (const RangeImage* range_image, const std::vector<int>* indices) :
+NarfDescriptor::NarfDescriptor (const RangeImage* range_image, const pcl::Indices* indices) :
range_image_ ()
{
setRangeImage (range_image, indices);
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void
-NarfDescriptor::setRangeImage (const RangeImage* range_image, const std::vector<int>* indices)
+NarfDescriptor::setRangeImage (const RangeImage* range_image, const pcl::Indices* indices)
{
range_image_ = range_image;
if (indices != nullptr)
{
- IndicesPtr indicesptr (new std::vector<int> (*indices));
+ IndicesPtr indicesptr (new pcl::Indices (*indices));
setIndices (indicesptr);
}
}
{
//std::cout << __PRETTY_FUNCTION__ << " called.\n";
- output.points.clear();
+ output.clear();
if (range_image_==nullptr)
{
<< ": RangeImage is not set. Sorry, the NARF descriptor calculation works on range images, not on normal point clouds."
<< " Use setRangeImage(...).\n\n";
output.width = output.height = 0;
- output.points.clear ();
+ output.clear ();
return;
}
if (parameters_.support_size <= 0.0f)
std::cerr << __PRETTY_FUNCTION__
<< ": support size is not set. Use getParameters().support_size = ...\n\n";
output.width = output.height = 0;
- output.points.clear ();
+ output.clear ();
return;
}
std::vector<Narf*> feature_list;
}
// Copy to NARF36 struct
- output.points.resize(feature_list.size());
+ output.resize(feature_list.size());
for (std::size_t i = 0; i < feature_list.size(); ++i)
{
feature_list[i]->copyToNarf36(output[i]);
void
RangeImageBorderExtractor::computeFeature (PointCloudOut& output)
{
- output.points.clear();
+ output.clear();
if (indices_)
{
std::cerr << __PRETTY_FUNCTION__
<< ": Sorry, usage of indices for the extraction is not supported for range image border extraction (yet).\n\n";
output.width = output.height = 0;
- output.points.clear ();
+ output.clear ();
return;
}
<< ": RangeImage is not set. Sorry, the border extraction works on range images, not on normal point clouds."
<< " Use setRangeImage(...).\n\n";
output.width = output.height = 0;
- output.points.clear ();
+ output.clear ();
return;
}
output = getBorderDescriptions ();
"include/pcl/${SUBSYS_NAME}/extract_indices.h"
"include/pcl/${SUBSYS_NAME}/filter.h"
"include/pcl/${SUBSYS_NAME}/filter_indices.h"
- "include/pcl/${SUBSYS_NAME}/experimental/functor_filter.h"
"include/pcl/${SUBSYS_NAME}/passthrough.h"
"include/pcl/${SUBSYS_NAME}/shadowpoints.h"
"include/pcl/${SUBSYS_NAME}/project_inliers.h"
"include/pcl/${SUBSYS_NAME}/local_maximum.h"
"include/pcl/${SUBSYS_NAME}/model_outlier_removal.h"
)
+set(experimental_incs
+ "include/pcl/${SUBSYS_NAME}/experimental/functor_filter.h"
+)
set(impl_incs
"include/pcl/${SUBSYS_NAME}/impl/conditional_removal.hpp"
# Install include files
PCL_ADD_INCLUDES("${SUBSYS_NAME}" "${SUBSYS_NAME}" ${incs})
+PCL_ADD_INCLUDES("${SUBSYS_NAME}" "${SUBSYS_NAME}/experimental" ${experimental_incs})
PCL_ADD_INCLUDES("${SUBSYS_NAME}" "${SUBSYS_NAME}/impl" ${impl_incs})
#pragma once
-#include <pcl/filters/boost.h>
#include <pcl/filters/filter.h>
namespace pcl
#pragma once
#include <pcl/filters/filter.h>
-#include <pcl/search/pcl_search.h>
+#include <pcl/search/search.h> // for Search
namespace pcl
{
* \return the intensity average at a given point index
*/
double
- computePointWeight (const int pid, const std::vector<int> &indices, const std::vector<float> &distances);
+ computePointWeight (const int pid, const Indices &indices, const std::vector<float> &distances);
/** \brief Set the half size of the Gaussian bilateral filter window.
* \param[in] sigma_s the half size of the Gaussian bilateral filter window to use
#ifdef __GNUC__
#pragma GCC system_header
#endif
+PCL_DEPRECATED_HEADER(1, 15, "Please include the needed boost headers directly.")
// Marking all Boost headers as system headers to remove warnings
#include <boost/random.hpp>
clipPlanarPolygon3D (const std::vector<PointT, Eigen::aligned_allocator<PointT> >& polygon, std::vector<PointT, Eigen::aligned_allocator<PointT> >& clipped_polygon) const override;
void
- clipPointCloud3D (const pcl::PointCloud<PointT> &cloud_in, std::vector<int>& clipped, const std::vector<int>& indices = std::vector<int> ()) const override;
+ clipPointCloud3D (const pcl::PointCloud<PointT> &cloud_in, Indices& clipped, const Indices& indices = Indices ()) const override;
Clipper3D<PointT>*
clone () const override;
* \return list of indices of remaining points after clipping.
*/
virtual void
- clipPointCloud3D (const pcl::PointCloud<PointT> &cloud_in, std::vector<int>& clipped, const std::vector<int>& indices = std::vector<int> ()) const = 0;
+ clipPointCloud3D (const pcl::PointCloud<PointT> &cloud_in, Indices& clipped, const Indices& indices = Indices ()) const = 0;
/**
* \brief polymorphic method to clone the underlying clipper with its parameters.
#pragma once
#include <pcl/memory.h>
-#include <pcl/pcl_macros.h>
-#include <pcl/common/eigen.h>
+#include <pcl/pcl_config.h> // for PCL_NO_PRECOMPILE
#include <pcl/filters/filter.h>
namespace pcl
#pragma once
#include <pcl/pcl_base.h>
-#include <pcl/filters/boost.h>
-#include <pcl/search/pcl_search.h>
namespace pcl
{
* \return the convolved point
*/
virtual PointOutT
- operator() (const std::vector<int>& indices, const std::vector<float>& distances) = 0;
+ operator() (const Indices& indices, const std::vector<float>& distances) = 0;
/** \brief Must call this method before doing any computation
* \note make sure to override this with at least
bool initCompute ();
virtual PointOutT
- operator() (const std::vector<int>& indices, const std::vector<float>& distances);
+ operator() (const Indices& indices, const std::vector<float>& distances);
protected:
float sigma_;
~GaussianKernelRGB () {}
PointOutT
- operator() (const std::vector<int>& indices, const std::vector<float>& distances);
+ operator() (const Indices& indices, const std::vector<float>& distances);
};
/** Convolution3D handles the non organized case where width and height are unknown or if you
* \param[out] indices the resultant point cloud indices
*/
void
- applyFilter (std::vector<int> &indices) override;
+ applyFilter (Indices &indices) override;
static bool
sort_dot_list_function (std::pair<int, double> a,
#pragma once
-#include <pcl/point_types.h>
#include <pcl/filters/filter_indices.h>
-#include <pcl/common/transforms.h>
-#include <pcl/common/eigen.h>
namespace pcl
{
* \param[out] indices the resultant point cloud indices
*/
void
- applyFilter (std::vector<int> &indices) override;
+ applyFilter (Indices &indices) override;
private:
/** \brief The minimum point of the box. */
Eigen::Vector4f min_pt_;
* \param indices the resultant point cloud indices
*/
void
- applyFilter (std::vector<int> &indices) override;
+ applyFilter (Indices &indices) override;
/** \brief The minimum point of the box. */
Eigen::Vector4f min_pt_;
#pragma once
-#include <pcl/point_types.h>
#include <pcl/Vertices.h>
#include <pcl/filters/filter_indices.h>
* \param[out] indices the indices of the set of points that passed the filter.
*/
void
- applyFilter (std::vector<int> &indices) override;
+ applyFilter (Indices &indices) override;
private:
/** \brief Return the size of the hull point cloud in line with coordinate axes.
* 2D polygon filter.
*/
template<unsigned PlaneDim1, unsigned PlaneDim2> void
- applyFilter2D (std::vector<int> &indices);
+ applyFilter2D (Indices &indices);
/** \brief Apply the three-dimensional hull filter.
* Polygon-ray crossings are used for three rays cast from each point
* polygon hull filter.
*/
void
- applyFilter3D (std::vector<int> &indices);
+ applyFilter3D (Indices &indices);
/** \brief Test an individual point against a 2D polygon.
* PlaneDim1 and PlaneDim2 specify the x/y/z coordinate axes to use.
namespace pcl {
namespace experimental {
+/**
+ * \brief Checks if the function object meets the usage in `FunctorFilter` class
+ * \details `Function` needs to be callable with a const reference to a PointCloud
+ * and an index value. The return type should be implicitly convertible to a boolean
+ */
template <typename PointT, typename Function>
-constexpr static bool is_functor_for_filter_v =
- pcl::is_invocable_r_v<bool,
- Function,
- const pcl::remove_cvref_t<pcl::PointCloud<PointT>>&,
- pcl::index_t>;
+constexpr static bool is_function_object_for_filter_v =
+ is_invocable_r_v<bool, Function, const PointCloud<PointT>&, index_t>;
+namespace advanced {
/**
- * \brief Filter point clouds and indices based on a functor passed in the ctor
+ * \brief Filter point clouds and indices based on a function object passed in the ctor
+ * \details The function object can be anything (lambda, std::function, invocable class,
+ * etc.) that can be moved into the class. Additionally, it must satisfy the condition
+ * `is_function_object_for_filter_v`
* \ingroup filters
*/
-template <typename PointT, typename Functor>
+template <typename PointT, typename FunctionObject>
class FunctorFilter : public FilterIndices<PointT> {
using Base = FilterIndices<PointT>;
- using PCLBase = pcl::PCLBase<PointT>;
+ using PCL_Base = PCLBase<PointT>;
public:
- using FunctorT = Functor;
+ using FunctionObjectT = FunctionObject;
// using in type would complicate signature
- static_assert(is_functor_for_filter_v<PointT, FunctorT>,
- "Functor signature must be similar to `bool(const PointCloud<PointT>&, "
- "index_t)`");
+ static_assert(is_function_object_for_filter_v<PointT, FunctionObjectT>,
+ "Function object signature must be similar to `bool(const "
+ "PointCloud<PointT>&, index_t)`");
protected:
using Base::extract_removed_indices_;
using Base::filter_name_;
using Base::negative_;
using Base::removed_indices_;
- using PCLBase::indices_;
- using PCLBase::input_;
+ using PCL_Base::indices_;
+ using PCL_Base::input_;
- // need to hold a value because functors can only be copy or move constructed
- FunctorT functor_;
+private:
+ // need to hold a value because lambdas can only be copy or move constructed in C++14
+ FunctionObjectT functionObject_;
public:
/** \brief Constructor.
+ * \param[in] function_object Object of effective type `FilterFunction` in order to
+ * filter out the indices for which it returns false
* \param[in] extract_removed_indices Set to true if you want to be able to
* extract the indices of points being removed (default = false).
*/
- FunctorFilter(FunctorT functor, bool extract_removed_indices = false)
- : Base(extract_removed_indices), functor_(std::move(functor))
+ FunctorFilter(FunctionObjectT function_object, bool extract_removed_indices = false)
+ : Base(extract_removed_indices), functionObject_(std::move(function_object))
{
filter_name_ = "functor_filter";
}
- const FunctorT&
- getFunctor() const noexcept
+ const FunctionObjectT&
+ getFunctionObject() const noexcept
{
- return functor_;
+ return functionObject_;
}
- FunctorT&
- getFunctor() noexcept
+ FunctionObjectT&
+ getFunctionObject() noexcept
{
- return functor_;
+ return functionObject_;
}
/**
applyFilter(Indices& indices) override
{
indices.clear();
- indices.reserve(input_->size());
+ indices.reserve(indices_->size());
if (extract_removed_indices_) {
removed_indices_->clear();
- removed_indices_->reserve(input_->size());
+ removed_indices_->reserve(indices_->size());
}
for (const auto index : *indices_) {
- // functor returns true for points that should be selected
- if (negative_ != functor_(*input_, index)) {
+ // function object returns true for points that should be selected
+ if (negative_ != functionObject_(*input_, index)) {
indices.push_back(index);
}
else if (extract_removed_indices_) {
}
}
}
+
+protected:
+ /**
+ * \brief ctor to be used by derived classes with member function as FilterFunction
+ * \param[in] extract_removed_indices Set to true if you want to be able to
+ * extract the indices of points being removed (default = false).
+ * \note The class would be ill-defined until `setFunctionObject` has been called
+ * Do not call any filter routine until then
+ */
+ FunctorFilter(bool extract_removed_indices = false) : Base(extract_removed_indices)
+ {
+ filter_name_ = "functor_filter";
+ }
+
+ /**
+ * \brief utility function for derived class
+ * \param[in] function_object Object of effective type `FilterFunction` in order to
+ * filter out the indices for which it returns false
+ */
+ void
+ setFunctionObject(FunctionObjectT function_object) const noexcept
+ {
+ functionObject_ = std::move(function_object);
+ }
};
+} // namespace advanced
+
+template <class PointT>
+using FilterFunction = std::function<bool(const PointCloud<PointT>&, index_t)>;
+
+template <class PointT>
+using FunctionFilter = advanced::FunctorFilter<PointT, FilterFunction<PointT>>;
} // namespace experimental
} // namespace pcl
* \param[out] indices The resultant indices.
*/
void
- applyFilter (std::vector<int> &indices) override
+ applyFilter (Indices &indices) override
{
applyFilterIndices (indices);
}
* \param[out] indices The resultant indices.
*/
void
- applyFilterIndices (std::vector<int> &indices);
+ applyFilterIndices (Indices &indices);
};
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
* \param indices the resultant indices
*/
void
- applyFilter (std::vector<int> &indices) override;
+ applyFilter (Indices &indices) override;
};
}
#pragma once
#include <pcl/pcl_base.h>
-#include <pcl/common/io.h>
-#include <pcl/conversions.h>
-#include <pcl/filters/boost.h>
-#include <cfloat>
+#include <pcl/common/io.h> // for copyPointCloud
#include <pcl/PointIndices.h>
namespace pcl
template<typename PointT> void
removeNaNFromPointCloud (const pcl::PointCloud<PointT> &cloud_in,
pcl::PointCloud<PointT> &cloud_out,
- std::vector<int> &index);
+ Indices &index);
/** \brief Removes points that have their normals invalid (i.e., equal to NaN)
* \param[in] cloud_in the input point cloud
template<typename PointT> void
removeNaNNormalsFromPointCloud (const pcl::PointCloud<PointT> &cloud_in,
pcl::PointCloud<PointT> &cloud_out,
- std::vector<int> &index);
+ Indices &index);
////////////////////////////////////////////////////////////////////////////////////////////
/** \brief Filter represents the base filter class. All filters must inherit from this interface.
* separate list. Default: false.
*/
Filter (bool extract_removed_indices = false) :
- removed_indices_ (new std::vector<int>),
+ removed_indices_ (new Indices),
extract_removed_indices_ (extract_removed_indices)
{
}
* separate list. Default: false.
*/
Filter (bool extract_removed_indices = false) :
- removed_indices_ (new std::vector<int>),
+ removed_indices_ (new Indices),
extract_removed_indices_ (extract_removed_indices)
{
}
* \ingroup filters
*/
template<typename PointT> void
- removeNaNFromPointCloud (const pcl::PointCloud<PointT> &cloud_in, std::vector<int> &index);
+ removeNaNFromPointCloud (const pcl::PointCloud<PointT> &cloud_in, Indices &index);
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/** \brief @b FilterIndices represents the base class for filters that are about binary point removal.
* <br>
- * All derived classes have to implement the \a filter (PointCloud &output) and the \a filter (std::vector<int> &indices) methods.
+ * All derived classes have to implement the \a filter (PointCloud &output) and the \a filter (Indices &indices) methods.
* Ideally they also make use of the \a negative_, \a keep_organized_ and \a extract_removed_indices_ systems.
* The distinguishment between the \a negative_ and \a extract_removed_indices_ systems only makes sense if the class automatically
* filters non-finite entries in the filtering methods (recommended).
* \param[out] indices the resultant filtered point cloud indices
*/
void
- filter (std::vector<int> &indices)
+ filter (Indices &indices)
{
if (!initCompute ())
return;
/** \brief Abstract filter method for point cloud indices. */
virtual void
- applyFilter (std::vector<int> &indices) = 0;
+ applyFilter (Indices &indices) = 0;
/** \brief Abstract filter method for point cloud. */
void
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/** \brief @b FilterIndices represents the base class for filters that are about binary point removal.
* <br>
- * All derived classes have to implement the \a filter (PointCloud &output) and the \a filter (std::vector<int> &indices) methods.
+ * All derived classes have to implement the \a filter (PointCloud &output) and the \a filter (Indices &indices) methods.
* Ideally they also make use of the \a negative_, \a keep_organized_ and \a extract_removed_indices_ systems.
* The distinguishment between the \a negative_ and \a extract_removed_indices_ systems only makes sense if the class automatically
* filters non-finite entries in the filtering methods (recommended).
* \param[out] indices the resultant filtered point cloud indices
*/
void
- filter (std::vector<int> &indices);
+ filter (Indices &indices);
/** \brief Set whether the regular conditions for points filtering should apply, or the inverted conditions.
* \param[in] negative false = normal filter behavior (default), true = inverted behavior.
/** \brief Abstract filter method for point cloud indices. */
virtual void
- applyFilter (std::vector<int> &indices) = 0;
+ applyFilter (Indices &indices) = 0;
/** \brief Abstract filter method for point cloud. */
void
#pragma once
#include <pcl/memory.h>
-#include <pcl/pcl_macros.h>
+#include <pcl/pcl_config.h> // for PCL_NO_PRECOMPILE
#include <pcl/point_types.h>
#include <pcl/filters/filter_indices.h>
-#include <pcl/common/transforms.h>
-#include <pcl/common/eigen.h>
namespace pcl
{
* \param[out] indices the resultant point cloud indices
*/
void
- applyFilter (std::vector<int> &indices) override;
+ applyFilter (Indices &indices) override;
private:
#pragma once
-#include <pcl/filters/boost.h>
#include <pcl/filters/filter.h>
#include <pcl/filters/filter_indices.h>
* \param[out] indices The resultant indices.
*/
void
- applyFilter (std::vector<int> &indices) override
+ applyFilter (Indices &indices) override
{
applyFilterIndices (indices);
}
* \param[out] indices The resultant indices.
*/
void
- applyFilterIndices (std::vector<int> &indices);
+ applyFilterIndices (Indices &indices);
};
}
#include <pcl/common/io.h>
#include <pcl/filters/approximate_voxel_grid.h>
+#include <boost/mpl/size.hpp> // for size
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT> void
}
Eigen::VectorXf scratch = Eigen::VectorXf::Zero (centroid_size);
- output.points.resize (input_->size ()); // size output for worst case
+ output.resize (input_->size ()); // size output for worst case
std::size_t op = 0; // output pointer
for (const auto& point: *input_)
{
if (hhe->count)
flush (output, op++, hhe, rgba_index, centroid_size);
}
- output.points.resize (op);
+ output.resize (op);
output.width = output.size ();
output.height = 1; // downsampling breaks the organized structure
output.is_dense = false; // we filter out invalid points
#define PCL_FILTERS_BILATERAL_IMPL_H_
#include <pcl/filters/bilateral.h>
+#include <pcl/search/organized.h> // for OrganizedNeighbor
+#include <pcl/search/kdtree.h> // for KdTree
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT> double
pcl::BilateralFilter<PointT>::computePointWeight (const int pid,
- const std::vector<int> &indices,
+ const Indices &indices,
const std::vector<float> &distances)
{
double BF = 0, W = 0;
}
tree_->setInputCloud (input_);
- std::vector<int> k_indices;
+ Indices k_indices;
std::vector<float> k_distances;
// Copy the input data into the output
output = *input_;
// For all the indices given (equal to the entire cloud if none given)
- for (std::size_t i = 0; i < indices_->size (); ++i)
+ for (const auto& idx : (*indices_))
{
// Perform a radius search to find the nearest neighbors
- tree_->radiusSearch ((*indices_)[i], sigma_s_ * 2, k_indices, k_distances);
+ tree_->radiusSearch (idx, sigma_s_ * 2, k_indices, k_distances);
// Overwrite the intensity value with the computed average
- output[(*indices_)[i]].intensity = static_cast<float> (computePointWeight ((*indices_)[i], k_indices, k_distances));
+ output[idx].intensity = static_cast<float> (computePointWeight (idx, k_indices, k_distances));
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// /ToDo: write fast version using eigen map and single matrix vector multiplication, that uses advantages of eigens SSE operations.
template<typename PointT> void
-pcl::BoxClipper3D<PointT>::clipPointCloud3D (const pcl::PointCloud<PointT>& cloud_in, std::vector<int>& clipped, const std::vector<int>& indices) const
+pcl::BoxClipper3D<PointT>::clipPointCloud3D (const pcl::PointCloud<PointT>& cloud_in, Indices& clipped, const Indices& indices) const
{
clipped.clear ();
if (indices.empty ())
}
else
{
- for (const int &index : indices)
+ for (const auto &index : indices)
if (clipPoint3D (cloud_in[index]))
clipped.push_back (index);
}
output.width = this->input_->width;
output.is_dense = this->input_->is_dense;
}
- output.points.resize (input_->size ());
+ output.resize (input_->size ());
removed_indices_->resize (input_->size ());
int nr_removed_p = 0;
}
output.width = nr_p;
- output.points.resize (nr_p);
+ output.resize (nr_p);
}
else
{
- std::vector<int> indices = *Filter<PointT>::indices_;
+ Indices indices = *Filter<PointT>::indices_;
std::sort (indices.begin (), indices.end ()); //TODO: is this necessary or can we assume the indices to be sorted?
bool removed_p = false;
std::size_t ci = 0;
///////////////////////////////////////////////////////////////////////////////////////////////////
template<typename PointInT, typename PointOutT> PointOutT
-pcl::filters::GaussianKernel<PointInT, PointOutT>::operator() (const std::vector<int>& indices,
+pcl::filters::GaussianKernel<PointInT, PointOutT>::operator() (const Indices& indices,
const std::vector<float>& distances)
{
using namespace pcl::common;
float total_weight = 0;
std::vector<float>::const_iterator dist_it = distances.begin ();
- for (std::vector<int>::const_iterator idx_it = indices.begin ();
+ for (Indices::const_iterator idx_it = indices.begin ();
idx_it != indices.end ();
++idx_it, ++dist_it)
{
///////////////////////////////////////////////////////////////////////////////////////////////////////
template<typename PointInT, typename PointOutT> PointOutT
-pcl::filters::GaussianKernelRGB<PointInT, PointOutT>::operator() (const std::vector<int>& indices, const std::vector<float>& distances)
+pcl::filters::GaussianKernelRGB<PointInT, PointOutT>::operator() (const Indices& indices, const std::vector<float>& distances)
{
using namespace pcl::common;
PointOutT result;
float r = 0, g = 0, b = 0;
std::vector<float>::const_iterator dist_it = distances.begin ();
- for (std::vector<int>::const_iterator idx_it = indices.begin ();
+ for (Indices::const_iterator idx_it = indices.begin ();
idx_it != indices.end ();
++idx_it, ++dist_it)
{
// Do a fast check to see if the search parameters are well defined
if (search_radius_ <= 0.0)
{
- PCL_ERROR ("[pcl::filters::Convlution3D::initCompute] search radius (%f) must be > 0",
+ PCL_ERROR ("[pcl::filters::Convlution3D::initCompute] search radius (%f) must be > 0\n",
search_radius_);
return (false);
}
// Make sure the provided kernel implements the required interface
if (dynamic_cast<ConvolvingKernel<PointInT, PointOutT>* > (&kernel_) == 0)
{
- PCL_ERROR ("[pcl::filters::Convlution3D::initCompute] init failed");
+ PCL_ERROR ("[pcl::filters::Convlution3D::initCompute] init failed : ");
PCL_ERROR ("kernel_ must implement ConvolvingKernel interface\n!");
return (false);
}
output.width = surface_->width;
output.height = surface_->height;
output.is_dense = surface_->is_dense;
- std::vector<int> nn_indices;
+ Indices nn_indices;
std::vector<float> nn_distances;
#pragma omp parallel for \
#ifndef PCL_FILTERS_IMPL_COVARIANCE_SAMPLING_H_
#define PCL_FILTERS_IMPL_COVARIANCE_SAMPLING_H_
-#include <pcl/common/eigen.h>
#include <pcl/filters/covariance_sampling.h>
#include <list>
+#include <Eigen/Eigenvalues> // for SelfAdjointEigenSolver
///////////////////////////////////////////////////////////////////////////////
template<typename PointT, typename PointNT> bool
///////////////////////////////////////////////////////////////////////////////
template<typename PointT, typename PointNT> void
-pcl::CovarianceSampling<PointT, PointNT>::applyFilter (std::vector<int> &sampled_indices)
+pcl::CovarianceSampling<PointT, PointNT>::applyFilter (Indices &sampled_indices)
{
Eigen::Matrix<double, 6, 6> c_mat;
// Invokes initCompute()
}
// Remap the sampled_indices to the input_ cloud
- for (int &sampled_index : sampled_indices)
+ for (auto &sampled_index : sampled_indices)
sampled_index = (*indices_)[candidate_indices[sampled_index]];
}
template<typename PointT, typename PointNT> void
pcl::CovarianceSampling<PointT, PointNT>::applyFilter (Cloud &output)
{
- std::vector<int> sampled_indices;
+ Indices sampled_indices;
applyFilter (sampled_indices);
output.resize (sampled_indices.size ());
#define PCL_FILTERS_IMPL_CROP_BOX_H_
#include <pcl/filters/crop_box.h>
+#include <pcl/common/eigen.h> // for getTransformation
+#include <pcl/common/point_tests.h> // for isFinite
+#include <pcl/common/transforms.h> // for transformPoint
///////////////////////////////////////////////////////////////////////////////
template<typename PointT> void
-pcl::CropBox<PointT>::applyFilter (std::vector<int> &indices)
+pcl::CropBox<PointT>::applyFilter (Indices &indices)
{
indices.resize (input_->size ());
removed_indices_->resize (input_->size ());
if (negative_)
indices[indices_count++] = index;
else if (extract_removed_indices_)
- (*removed_indices_)[removed_indices_count++] = static_cast<int> (index);
+ (*removed_indices_)[removed_indices_count++] = index;
}
// If inside the cropbox
else
{
if (negative_ && extract_removed_indices_)
- (*removed_indices_)[removed_indices_count++] = static_cast<int> (index);
+ (*removed_indices_)[removed_indices_count++] = index;
else if (!negative_)
indices[indices_count++] = index;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template<typename PointT> void
-pcl::CropHull<PointT>::applyFilter (std::vector<int> &indices)
+pcl::CropHull<PointT>::applyFilter (Indices &indices)
{
if (dim_ == 2)
{
-std::numeric_limits<float>::max (),
-std::numeric_limits<float>::max ()
);
- for (std::size_t index = 0; index < indices_->size (); index++)
+ for (pcl::Vertices const & poly : hull_polygons_)
{
- Eigen::Vector3f pt = (*input_)[(*indices_)[index]].getVector3fMap ();
- for (int i = 0; i < 3; i++)
+ for (auto const & idx : poly.vertices)
{
- if (pt[i] < cloud_min[i]) cloud_min[i] = pt[i];
- if (pt[i] > cloud_max[i]) cloud_max[i] = pt[i];
+ Eigen::Vector3f pt = (*hull_cloud_)[idx].getVector3fMap ();
+ cloud_min = cloud_min.cwiseMin(pt);
+ cloud_max = cloud_max.cwiseMax(pt);
}
}
-
+
return (cloud_max - cloud_min);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template<typename PointT> template<unsigned PlaneDim1, unsigned PlaneDim2> void
-pcl::CropHull<PointT>::applyFilter2D (std::vector<int> &indices)
+pcl::CropHull<PointT>::applyFilter2D (Indices &indices)
{
// see comments in (PointCloud& output) overload
for (std::size_t index = 0; index < indices_->size (); index++)
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template<typename PointT> void
-pcl::CropHull<PointT>::applyFilter3D (std::vector<int> &indices)
+pcl::CropHull<PointT>::applyFilter3D (Indices &indices)
{
// see comments in applyFilter3D (PointCloud& output)
for (std::size_t index = 0; index < indices_->size (); index++)
#define PCL_FILTERS_IMPL_EXTRACT_INDICES_HPP_
#include <pcl/filters/extract_indices.h>
+#include <numeric> // for std::iota
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT> void
pcl::ExtractIndices<PointT>::filterDirectly (PointCloudPtr &cloud)
{
- std::vector<int> indices;
+ Indices indices;
bool temp = extract_removed_indices_;
extract_removed_indices_ = true;
this->setInputCloud (cloud);
std::vector<pcl::PCLPointField> fields;
pcl::for_each_type<FieldList> (pcl::detail::FieldAdder<PointT> (fields));
- for (int rii = 0; rii < static_cast<int> (removed_indices_->size ()); ++rii) // rii = removed indices iterator
+ for (const auto& rii : (*removed_indices_)) // rii = removed indices iterator
{
- std::size_t pt_index = (std::size_t) (*removed_indices_)[rii];
+ uindex_t pt_index = (uindex_t) rii;
if (pt_index >= input_->size ())
{
PCL_ERROR ("[pcl::%s::filterDirectly] The index exceeds the size of the input. Do nothing.\n",
template <typename PointT> void
pcl::ExtractIndices<PointT>::applyFilter (PointCloud &output)
{
- std::vector<int> indices;
+ Indices indices;
if (keep_organized_)
{
bool temp = extract_removed_indices_;
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT> void
-pcl::ExtractIndices<PointT>::applyFilterIndices (std::vector<int> &indices)
+pcl::ExtractIndices<PointT>::applyFilterIndices (Indices &indices)
{
if (indices_->size () > input_->size ())
{
if (extract_removed_indices_)
{
// Set up the full indices set
- std::vector<int> full_indices (input_->size ());
- for (int fii = 0; fii < static_cast<int> (full_indices.size ()); ++fii) // fii = full indices iterator
- full_indices[fii] = fii;
+ Indices full_indices (input_->size ());
+ std::iota (full_indices.begin (), full_indices.end (), static_cast<index_t> (0));
// Set up the sorted input indices
- std::vector<int> sorted_input_indices = *indices_;
+ Indices sorted_input_indices = *indices_;
std::sort (sorted_input_indices.begin (), sorted_input_indices.end ());
// Store the difference in removed_indices
else // Inverted functionality
{
// Set up the full indices set
- std::vector<int> full_indices (input_->size ());
- for (int fii = 0; fii < static_cast<int> (full_indices.size ()); ++fii) // fii = full indices iterator
- full_indices[fii] = fii;
+ Indices full_indices (input_->size ());
+ std::iota (full_indices.begin (), full_indices.end (), static_cast<index_t> (0));
// Set up the sorted input indices
- std::vector<int> sorted_input_indices = *indices_;
+ Indices sorted_input_indices = *indices_;
std::sort (sorted_input_indices.begin (), sorted_input_indices.end ());
// Store the difference in indices
#include <pcl/filters/fast_bilateral_omp.h>
#include <pcl/common/io.h>
-#include <cassert>
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT> void
#pragma once
-#include <pcl/pcl_macros.h>
+#include <pcl/pcl_exports.h> // for PCL_EXPORTS
#include <pcl/common/point_tests.h> // for pcl::isFinite
#include <pcl/filters/filter.h>
template <typename PointT> void
pcl::removeNaNFromPointCloud (const pcl::PointCloud<PointT> &cloud_in,
pcl::PointCloud<PointT> &cloud_out,
- std::vector<int> &index)
+ Indices &index)
{
// If the clouds are not the same, prepare the output
if (&cloud_in != &cloud_out)
{
cloud_out.header = cloud_in.header;
- cloud_out.points.resize (cloud_in.size ());
+ cloud_out.resize (cloud_in.size ());
cloud_out.sensor_origin_ = cloud_in.sensor_origin_;
cloud_out.sensor_orientation_ = cloud_in.sensor_orientation_;
}
if (j != cloud_in.size ())
{
// Resize to the correct size
- cloud_out.points.resize (j);
+ cloud_out.resize (j);
index.resize (j);
}
template <typename PointT> void
pcl::removeNaNNormalsFromPointCloud (const pcl::PointCloud<PointT> &cloud_in,
pcl::PointCloud<PointT> &cloud_out,
- std::vector<int> &index)
+ Indices &index)
{
// If the clouds are not the same, prepare the output
if (&cloud_in != &cloud_out)
{
cloud_out.header = cloud_in.header;
- cloud_out.points.resize (cloud_in.size ());
+ cloud_out.resize (cloud_in.size ());
cloud_out.sensor_origin_ = cloud_in.sensor_origin_;
cloud_out.sensor_orientation_ = cloud_in.sensor_orientation_;
}
if (j != cloud_in.size ())
{
// Resize to the correct size
- cloud_out.points.resize (j);
+ cloud_out.resize (j);
index.resize (j);
}
}
-#define PCL_INSTANTIATE_removeNaNFromPointCloud(T) template PCL_EXPORTS void pcl::removeNaNFromPointCloud<T>(const pcl::PointCloud<T>&, pcl::PointCloud<T>&, std::vector<int>&);
-#define PCL_INSTANTIATE_removeNaNNormalsFromPointCloud(T) template PCL_EXPORTS void pcl::removeNaNNormalsFromPointCloud<T>(const pcl::PointCloud<T>&, pcl::PointCloud<T>&, std::vector<int>&);
+#define PCL_INSTANTIATE_removeNaNFromPointCloud(T) template PCL_EXPORTS void pcl::removeNaNFromPointCloud<T>(const pcl::PointCloud<T>&, pcl::PointCloud<T>&, Indices&);
+#define PCL_INSTANTIATE_removeNaNNormalsFromPointCloud(T) template PCL_EXPORTS void pcl::removeNaNNormalsFromPointCloud<T>(const pcl::PointCloud<T>&, pcl::PointCloud<T>&, Indices&);
template <typename PointT> void
pcl::removeNaNFromPointCloud (const pcl::PointCloud<PointT> &cloud_in,
- std::vector<int> &index)
+ Indices &index)
{
// Reserve enough space for the indices
index.resize (cloud_in.size ());
template<typename PointT> void
pcl::FilterIndices<PointT>::applyFilter (PointCloud &output)
{
- std::vector<int> indices;
+ Indices indices;
if (keep_organized_)
{
if (!extract_removed_indices_)
}
-#define PCL_INSTANTIATE_removeNanFromPointCloud(T) template PCL_EXPORTS void pcl::removeNaNFromPointCloud<T>(const pcl::PointCloud<T>&, std::vector<int>&);
+#define PCL_INSTANTIATE_removeNanFromPointCloud(T) template PCL_EXPORTS void pcl::removeNaNFromPointCloud<T>(const pcl::PointCloud<T>&, Indices&);
#define PCL_INSTANTIATE_FilterIndices(T) template class PCL_EXPORTS pcl::FilterIndices<T>;
#endif // PCL_FILTERS_IMPL_FILTER_INDICES_H_
///////////////////////////////////////////////////////////////////////////////
template <typename PointT> void
-pcl::FrustumCulling<PointT>::applyFilter (std::vector<int> &indices)
+pcl::FrustumCulling<PointT>::applyFilter (Indices &indices)
{
Eigen::Vector4f pl_n; // near plane
Eigen::Vector4f pl_f; // far plane
#include <pcl/common/common.h>
#include <pcl/common/io.h>
+#include <pcl/common/point_tests.h> // for isXYZFinite
#include <pcl/filters/grid_minimum.h>
struct point_index_idx
{
PCL_WARN ("[pcl::%s::applyFilter] No input dataset given!\n", getClassName ().c_str ());
output.width = output.height = 0;
- output.points.clear ();
+ output.clear ();
return;
}
- std::vector<int> indices;
+ Indices indices;
output.is_dense = true;
applyFilterIndices (indices);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT> void
-pcl::GridMinimum<PointT>::applyFilterIndices (std::vector<int> &indices)
+pcl::GridMinimum<PointT>::applyFilterIndices (Indices &indices)
{
indices.resize (indices_->size ());
int oii = 0;
if ((dx*dy) > static_cast<std::int64_t> (std::numeric_limits<std::int32_t>::max ()))
{
- PCL_WARN ("[pcl::%s::applyFilter] Leaf size is too small for the input dataset. Integer indices would overflow.", getClassName ().c_str ());
+ PCL_WARN ("[pcl::%s::applyFilter] Leaf size is too small for the input dataset. Integer indices would overflow.\n", getClassName ().c_str ());
return;
}
// First pass: go over all points and insert them into the index_vector vector
// with calculated idx. Points with the same idx value will contribute to the
// same point of resulting CloudPoint
- for (std::vector<int>::const_iterator it = indices_->begin (); it != indices_->end (); ++it)
+ for (const auto& index : (*indices_))
{
if (!input_->is_dense)
// Check if the point is invalid
- if (!std::isfinite ((*input_)[*it].x) ||
- !std::isfinite ((*input_)[*it].y) ||
- !std::isfinite ((*input_)[*it].z))
+ if (!isXYZFinite ((*input_)[index]))
continue;
- int ijk0 = static_cast<int> (std::floor ((*input_)[*it].x * inverse_resolution_) - static_cast<float> (min_b[0]));
- int ijk1 = static_cast<int> (std::floor ((*input_)[*it].y * inverse_resolution_) - static_cast<float> (min_b[1]));
+ int ijk0 = static_cast<int> (std::floor ((*input_)[index].x * inverse_resolution_) - static_cast<float> (min_b[0]));
+ int ijk1 = static_cast<int> (std::floor ((*input_)[index].y * inverse_resolution_) - static_cast<float> (min_b[1]));
// Compute the grid cell index
int idx = ijk0 * divb_mul[0] + ijk1 * divb_mul[1];
- index_vector.emplace_back(static_cast<unsigned int> (idx), *it);
+ index_vector.emplace_back(static_cast<unsigned int> (idx), index);
}
// Second pass: sort the index_vector vector using value representing target cell as index
#include <pcl/filters/local_maximum.h>
#include <pcl/filters/project_inliers.h>
#include <pcl/ModelCoefficients.h>
+#include <pcl/search/organized.h> // for OrganizedNeighbor
+#include <pcl/search/kdtree.h> // for KdTree
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT> void
{
PCL_WARN ("[pcl::%s::applyFilter] No input dataset given!\n", getClassName ().c_str ());
output.width = output.height = 0;
- output.points.clear ();
+ output.clear ();
return;
}
- std::vector<int> indices;
+ Indices indices;
output.is_dense = true;
applyFilterIndices (indices);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT> void
-pcl::LocalMaximum<PointT>::applyFilterIndices (std::vector<int> &indices)
+pcl::LocalMaximum<PointT>::applyFilterIndices (Indices &indices)
{
typename PointCloud::Ptr cloud_projected (new PointCloud);
// Find all points within xy radius (i.e., a vertical cylinder) of the query
// point, removing those that are locally maximal (i.e., highest z within the
// cylinder)
- for (int iii = 0; iii < static_cast<int> (indices_->size ()); ++iii)
+ for (const auto& iii : (*indices_))
{
- if (!isFinite ((*input_)[(*indices_)[iii]]))
+ if (!isFinite ((*input_)[iii]))
{
continue;
}
// Points in the neighborhood of a previously identified local max, will
// not be maximal in their own neighborhood
- if (point_is_visited[(*indices_)[iii]] && !point_is_max[(*indices_)[iii]])
+ if (point_is_visited[iii] && !point_is_max[iii])
{
continue;
}
// Assume the current query point is the maximum, mark as visited
- point_is_max[(*indices_)[iii]] = true;
- point_is_visited[(*indices_)[iii]] = true;
+ point_is_max[iii] = true;
+ point_is_visited[iii] = true;
// Perform the radius search in the projected cloud
- std::vector<int> radius_indices;
+ Indices radius_indices;
std::vector<float> radius_dists;
- PointT p = (*cloud_projected)[(*indices_)[iii]];
+ PointT p = (*cloud_projected)[iii];
if (searcher_->radiusSearch (p, radius_, radius_indices, radius_dists) == 0)
{
PCL_WARN ("[pcl::%s::applyFilter] Searching for neighbors within radius %f failed.\n", getClassName ().c_str (), radius_);
// If query point is alone, we retain it regardless
if (radius_indices.size () == 1)
{
- point_is_max[(*indices_)[iii]] = false;
+ point_is_max[iii] = false;
}
// Check to see if a neighbor is higher than the query point
- float query_z = (*input_)[(*indices_)[iii]].z;
+ float query_z = (*input_)[iii].z;
for (std::size_t k = 1; k < radius_indices.size (); ++k) // k = 1 is the first neighbor
{
if ((*input_)[radius_indices[k]].z > query_z)
{
// Query point is not the local max, no need to check others
- point_is_max[(*indices_)[iii]] = false;
+ point_is_max[iii] = false;
break;
}
}
// If the query point was a local max, all neighbors can be marked as
// visited, excluding them from future consideration as local maxima
- if (point_is_max[(*indices_)[iii]])
+ if (point_is_max[iii])
{
for (std::size_t k = 1; k < radius_indices.size (); ++k) // k = 1 is the first neighbor
{
// Points that are local maxima are passed to removed indices
// Unless negative was set, then it's the opposite condition
- if ((!negative_ && point_is_max[(*indices_)[iii]]) || (negative_ && !point_is_max[(*indices_)[iii]]))
+ if ((!negative_ && point_is_max[iii]) || (negative_ && !point_is_max[iii]))
{
if (extract_removed_indices_)
{
- (*removed_indices_)[rii++] = (*indices_)[iii];
+ (*removed_indices_)[rii++] = iii;
}
continue;
}
// Otherwise it was a normal point for output (inlier)
- indices[oii++] = (*indices_)[iii];
+ indices[oii++] = iii;
}
// Resize the output arrays
continue;
// The output depth will be the median of all the depths in the window
- partial_sort (vals.begin (), vals.begin () + vals.size () / 2 + 1, vals.end ());
- float new_depth = vals[vals.size () / 2];
+ auto middle_it = vals.begin () + vals.size () / 2;
+ std::nth_element (vals.begin (), middle_it, vals.end ());
+ float new_depth = *middle_it;
// Do not allow points to move more than the set max_allowed_movement_
if (std::abs (new_depth - (*input_)(x, y).z) < max_allowed_movement_)
output (x, y).z = new_depth;
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT> void
-pcl::ModelOutlierRemoval<PointT>::applyFilterIndices (std::vector<int> &indices)
+pcl::ModelOutlierRemoval<PointT>::applyFilterIndices (Indices &indices)
{
//The arrays to be used
indices.resize (indices_->size ());
//if the filter setup is invalid filter for nan and return;
if (!valid_setup)
{
- for (int iii = 0; iii < static_cast<int> (indices_->size ()); ++iii) // iii = input indices iterator
+ for (const auto& iii : (*indices_)) // iii = input indices iterator
{
// Non-finite entries are always passed to removed indices
- if (!isFinite ((*input_)[ (*indices_)[iii]]))
+ if (!isFinite ((*input_)[iii]))
{
if (extract_removed_indices_)
- (*removed_indices_)[rii++] = (*indices_)[iii];
+ (*removed_indices_)[rii++] = iii;
continue;
}
- indices[oii++] = (*indices_)[iii];
+ indices[oii++] = iii;
}
return;
}
#define PCL_FILTERS_IMPL_MORPHOLOGICAL_FILTER_H_
#include <limits>
-#include <vector>
#include <Eigen/Core>
for (std::size_t p_idx = 0; p_idx < cloud_in->size (); ++p_idx)
{
Eigen::Vector3f bbox_min, bbox_max;
- std::vector<int> pt_indices;
+ Indices pt_indices;
float minx = (*cloud_in)[p_idx].x - half_res;
float miny = (*cloud_in)[p_idx].y - half_res;
float minz = -std::numeric_limits<float>::max ();
for (std::size_t p_idx = 0; p_idx < cloud_temp.size (); ++p_idx)
{
Eigen::Vector3f bbox_min, bbox_max;
- std::vector<int> pt_indices;
+ Indices pt_indices;
float minx = cloud_temp[p_idx].x - half_res;
float miny = cloud_temp[p_idx].y - half_res;
float minz = -std::numeric_limits<float>::max ();
for (std::size_t p_idx = 0; p_idx < cloud_temp.size (); ++p_idx)
{
Eigen::Vector3f bbox_min, bbox_max;
- std::vector<int> pt_indices;
+ Indices pt_indices;
float minx = cloud_temp[p_idx].x - half_res;
float miny = cloud_temp[p_idx].y - half_res;
float minz = -std::numeric_limits<float>::max ();
///////////////////////////////////////////////////////////////////////////////
template<typename PointT, typename NormalT> void
-pcl::NormalSpaceSampling<PointT, NormalT>::applyFilter (std::vector<int> &indices)
+pcl::NormalSpaceSampling<PointT, NormalT>::applyFilter (Indices &indices)
{
if (!initCompute ())
{
// If we need to return the indices that we haven't sampled
if (extract_removed_indices_)
{
- std::vector<int> indices_temp = indices;
+ Indices indices_temp = indices;
std::sort (indices_temp.begin (), indices_temp.end ());
- std::vector<int> all_indices_temp = *indices_;
+ Indices all_indices_temp = *indices_;
std::sort (all_indices_temp.begin (), all_indices_temp.end ());
set_difference (all_indices_temp.begin (), all_indices_temp.end (),
indices_temp.begin (), indices_temp.end (),
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT> void
-pcl::PassThrough<PointT>::applyFilterIndices (std::vector<int> &indices)
+pcl::PassThrough<PointT>::applyFilterIndices (Indices &indices)
{
// The arrays to be used
indices.resize (indices_->size ());
// /ToDo: write fast version using eigen map and single matrix vector multiplication, that uses advantages of eigens SSE operations.
template<typename PointT> void
-pcl::PlaneClipper3D<PointT>::clipPointCloud3D (const pcl::PointCloud<PointT>& cloud_in, std::vector<int>& clipped, const std::vector<int>& indices) const
+pcl::PlaneClipper3D<PointT>::clipPointCloud3D (const pcl::PointCloud<PointT>& cloud_in, Indices& clipped, const Indices& indices) const
{
if (indices.empty ())
{
}
else
{
- for (std::vector<int>::const_iterator iIt = indices.begin (); iIt != indices.end (); ++iIt)
- if (clipPoint3D (cloud_in[*iIt]))
- clipped.push_back (*iIt);
+ for (const auto& index : indices)
+ if (clipPoint3D (cloud_in[index]))
+ clipped.push_back (index);
}
}
#endif //PCL_FILTERS_IMPL_PLANE_CLIPPER3D_HPP
#define PCL_FILTERS_IMPL_PROJECT_INLIERS_H_
#include <pcl/filters/project_inliers.h>
+#include <pcl/sample_consensus/sac_model_circle.h>
+#include <pcl/sample_consensus/sac_model_cylinder.h>
+#include <pcl/sample_consensus/sac_model_cone.h>
+#include <pcl/sample_consensus/sac_model_line.h>
+#include <pcl/sample_consensus/sac_model_normal_plane.h>
+#include <pcl/sample_consensus/sac_model_normal_sphere.h>
+#include <pcl/sample_consensus/sac_model_parallel_plane.h>
+#include <pcl/sample_consensus/sac_model_normal_parallel_plane.h>
+#include <pcl/sample_consensus/sac_model_parallel_line.h>
+#include <pcl/sample_consensus/sac_model_perpendicular_plane.h>
+#include <pcl/sample_consensus/sac_model_plane.h>
+#include <pcl/sample_consensus/sac_model_sphere.h>
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT> void
{
PCL_WARN ("[pcl::%s::applyFilter] No indices given or empty indices!\n", getClassName ().c_str ());
output.width = output.height = 0;
- output.points.clear ();
+ output.clear ();
return;
}
{
PCL_ERROR ("[pcl::%s::applyFilter] Error initializing the SAC model!\n", getClassName ().c_str ());
output.width = output.height = 0;
- output.points.clear ();
+ output.clear ();
return;
}
if (copy_all_data_)
{
if (!input_->isOrganized ())
{
- PCL_ERROR ("[pcl::fileters::%s::initCompute] Number of levels should be at least 2!", getClassName ().c_str ());
+ PCL_ERROR ("[pcl::fileters::%s::initCompute] Number of levels should be at least 2!\n", getClassName ().c_str ());
return (false);
}
if (levels_ < 2)
{
- PCL_ERROR ("[pcl::fileters::%s::initCompute] Number of levels should be at least 2!", getClassName ().c_str ());
+ PCL_ERROR ("[pcl::fileters::%s::initCompute] Number of levels should be at least 2!\n", getClassName ().c_str ());
return (false);
}
if (levels_ > 4)
{
- PCL_ERROR ("[pcl::fileters::%s::initCompute] Number of levels should not exceed 4!", getClassName ().c_str ());
+ PCL_ERROR ("[pcl::fileters::%s::initCompute] Number of levels should not exceed 4!\n", getClassName ().c_str ());
return (false);
}
#define PCL_FILTERS_IMPL_RADIUS_OUTLIER_REMOVAL_H_
#include <pcl/filters/radius_outlier_removal.h>
+#include <pcl/search/organized.h> // for OrganizedNeighbor
+#include <pcl/search/kdtree.h> // for KdTree
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT> void
-pcl::RadiusOutlierRemoval<PointT>::applyFilterIndices (std::vector<int> &indices)
+pcl::RadiusOutlierRemoval<PointT>::applyFilterIndices (Indices &indices)
{
if (search_radius_ == 0.0)
{
searcher_->setInputCloud (input_);
// The arrays to be used
- std::vector<int> nn_indices (indices_->size ());
+ Indices nn_indices (indices_->size ());
std::vector<float> nn_dists (indices_->size ());
indices.resize (indices_->size ());
removed_indices_->resize (indices_->size ());
int mean_k = min_pts_radius_ + 1;
double nn_dists_max = search_radius_ * search_radius_;
- for (std::vector<int>::const_iterator it = indices_->begin (); it != indices_->end (); ++it)
+ for (const auto& index : (*indices_))
{
// Perform the nearest-k search
- int k = searcher_->nearestKSearch (*it, mean_k, nn_indices, nn_dists);
+ int k = searcher_->nearestKSearch (index, mean_k, nn_indices, nn_dists);
// Check the number of neighbors
// Note: nn_dists is sorted, so check the last item
if (!chk_neighbors)
{
if (extract_removed_indices_)
- (*removed_indices_)[rii++] = *it;
+ (*removed_indices_)[rii++] = index;
continue;
}
// Otherwise it was a normal point for output (inlier)
- indices[oii++] = *it;
+ indices[oii++] = index;
}
}
// NaN or Inf values could exist => use radius search
else
{
- for (std::vector<int>::const_iterator it = indices_->begin (); it != indices_->end (); ++it)
+ for (const auto& index : (*indices_))
{
// Perform the radius search
// Note: k includes the query point, so is always at least 1
- int k = searcher_->radiusSearch (*it, search_radius_, nn_indices, nn_dists);
+ int k = searcher_->radiusSearch (index, search_radius_, nn_indices, nn_dists);
// Points having too few neighbors are outliers and are passed to removed indices
// Unless negative was set, then it's the opposite condition
if ((!negative_ && k <= min_pts_radius_) || (negative_ && k > min_pts_radius_))
{
if (extract_removed_indices_)
- (*removed_indices_)[rii++] = *it;
+ (*removed_indices_)[rii++] = index;
continue;
}
// Otherwise it was a normal point for output (inlier)
- indices[oii++] = *it;
+ indices[oii++] = index;
}
}
#define PCL_FILTERS_IMPL_RANDOM_SAMPLE_H_
#include <pcl/filters/random_sample.h>
-#include <pcl/type_traits.h>
///////////////////////////////////////////////////////////////////////////////
template<typename PointT>
void
-pcl::RandomSample<PointT>::applyFilter (std::vector<int> &indices)
+pcl::RandomSample<PointT>::applyFilter (Indices &indices)
{
std::size_t N = indices_->size ();
std::size_t sample_size = negative_ ? N - sample_ : sample_;
template<typename PointT> void
pcl::SamplingSurfaceNormal<PointT>::applyFilter (PointCloud &output)
{
- std::vector <int> indices;
+ Indices indices;
std::size_t npts = input_->size ();
for (std::size_t i = 0; i < npts; i++)
indices.push_back (i);
pcl::SamplingSurfaceNormal<PointT>::partition (
const PointCloud& cloud, const int first, const int last,
const Vector min_values, const Vector max_values,
- std::vector<int>& indices, PointCloud& output)
+ Indices& indices, PointCloud& output)
{
const int count (last - first);
if (count <= static_cast<int> (sample_))
template<typename PointT> void
pcl::SamplingSurfaceNormal<PointT>::samplePartition (
const PointCloud& data, const int first, const int last,
- std::vector <int>& indices, PointCloud& output)
+ Indices& indices, PointCloud& output)
{
pcl::PointCloud <PointT> cloud;
pcl::ShadowPoints<PointT, NormalT>::applyFilter (PointCloud &output)
{
assert (input_normals_ != NULL);
- output.points.resize (input_->size ());
+ output.resize (input_->size ());
if (extract_removed_indices_)
removed_indices_->resize (input_->size ());
}
}
- output.points.resize (cp);
+ output.resize (cp);
removed_indices_->resize (ri);
output.height = 1;
output.width = output.size ();
///////////////////////////////////////////////////////////////////////////////
template<typename PointT, typename NormalT> void
-pcl::ShadowPoints<PointT, NormalT>::applyFilter (std::vector<int> &indices)
+pcl::ShadowPoints<PointT, NormalT>::applyFilter (Indices &indices)
{
assert (input_normals_ != NULL);
indices.resize (input_->size ());
unsigned int k = 0;
unsigned int z = 0;
- for (std::vector<int>::const_iterator idx = indices_->begin (); idx != indices_->end (); ++idx)
+ for (const auto& idx : (*indices_))
{
- const NormalT &normal = (*input_normals_)[*idx];
- const PointT &pt = (*input_)[*idx];
+ const NormalT &normal = (*input_normals_)[idx];
+ const PointT &pt = (*input_)[idx];
float val = std::abs (normal.normal_x * pt.x + normal.normal_y * pt.y + normal.normal_z * pt.z);
if ( (val >= threshold_) ^ negative_)
- indices[k++] = *idx;
+ indices[k++] = idx;
else if (extract_removed_indices_)
- (*removed_indices_)[z++] = *idx;
+ (*removed_indices_)[z++] = idx;
}
indices.resize (k);
removed_indices_->resize (z);
#define PCL_FILTERS_IMPL_STATISTICAL_OUTLIER_REMOVAL_H_
#include <pcl/filters/statistical_outlier_removal.h>
+#include <pcl/search/organized.h> // for OrganizedNeighbor
+#include <pcl/search/kdtree.h> // for KdTree
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT> void
-pcl::StatisticalOutlierRemoval<PointT>::applyFilterIndices (std::vector<int> &indices)
+pcl::StatisticalOutlierRemoval<PointT>::applyFilterIndices (Indices &indices)
{
// Initialize the search class
if (!searcher_)
searcher_->setInputCloud (input_);
// The arrays to be used
- std::vector<int> nn_indices (mean_k_);
+ Indices nn_indices (mean_k_);
std::vector<float> nn_dists (mean_k_);
std::vector<float> distances (indices_->size ());
indices.resize (indices_->size ());
///////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT> void
pcl::getMinMax3D (const typename pcl::PointCloud<PointT>::ConstPtr &cloud,
- const std::vector<int> &indices,
+ const Indices &indices,
const std::string &distance_field_name, float min_distance, float max_distance,
Eigen::Vector4f &min_pt, Eigen::Vector4f &max_pt, bool limit_negative)
{
// If dense, no need to check for NaNs
if (cloud->is_dense)
{
- for (const int &index : indices)
+ for (const auto &index : indices)
{
// Get the distance value
const std::uint8_t* pt_data = reinterpret_cast<const std::uint8_t*> (&(*cloud)[index]);
}
else
{
- for (const int &index : indices)
+ for (const auto &index : indices)
{
// Get the distance value
const std::uint8_t* pt_data = reinterpret_cast<const std::uint8_t*> (&(*cloud)[index]);
{
PCL_WARN ("[pcl::%s::applyFilter] No input dataset given!\n", getClassName ().c_str ());
output.width = output.height = 0;
- output.points.clear ();
+ output.clear ();
return;
}
if ((dx*dy*dz) > static_cast<std::int64_t>(std::numeric_limits<std::int32_t>::max()))
{
- PCL_WARN("[pcl::%s::applyFilter] Leaf size is too small for the input dataset. Integer indices would overflow.", getClassName().c_str());
+ PCL_WARN("[pcl::%s::applyFilter] Leaf size is too small for the input dataset. Integer indices would overflow.\n", getClassName().c_str());
output = *input_;
return;
}
// First pass: go over all points and insert them into the index_vector vector
// with calculated idx. Points with the same idx value will contribute to the
// same point of resulting CloudPoint
- for (std::vector<int>::const_iterator it = indices_->begin (); it != indices_->end (); ++it)
+ for (const auto& index : (*indices_))
{
if (!input_->is_dense)
// Check if the point is invalid
- if (!std::isfinite ((*input_)[*it].x) ||
- !std::isfinite ((*input_)[*it].y) ||
- !std::isfinite ((*input_)[*it].z))
+ if (!isXYZFinite ((*input_)[index]))
continue;
// Get the distance value
- const std::uint8_t* pt_data = reinterpret_cast<const std::uint8_t*> (&(*input_)[*it]);
+ const std::uint8_t* pt_data = reinterpret_cast<const std::uint8_t*> (&(*input_)[index]);
float distance_value = 0;
memcpy (&distance_value, pt_data + fields[distance_idx].offset, sizeof (float));
continue;
}
- int ijk0 = static_cast<int> (std::floor ((*input_)[*it].x * inverse_leaf_size_[0]) - static_cast<float> (min_b_[0]));
- int ijk1 = static_cast<int> (std::floor ((*input_)[*it].y * inverse_leaf_size_[1]) - static_cast<float> (min_b_[1]));
- int ijk2 = static_cast<int> (std::floor ((*input_)[*it].z * inverse_leaf_size_[2]) - static_cast<float> (min_b_[2]));
+ int ijk0 = static_cast<int> (std::floor ((*input_)[index].x * inverse_leaf_size_[0]) - static_cast<float> (min_b_[0]));
+ int ijk1 = static_cast<int> (std::floor ((*input_)[index].y * inverse_leaf_size_[1]) - static_cast<float> (min_b_[1]));
+ int ijk2 = static_cast<int> (std::floor ((*input_)[index].z * inverse_leaf_size_[2]) - static_cast<float> (min_b_[2]));
// Compute the centroid leaf index
int idx = ijk0 * divb_mul_[0] + ijk1 * divb_mul_[1] + ijk2 * divb_mul_[2];
- index_vector.emplace_back(static_cast<unsigned int> (idx), *it);
+ index_vector.emplace_back(static_cast<unsigned int> (idx), index);
}
}
// No distance filtering, process all data
// First pass: go over all points and insert them into the index_vector vector
// with calculated idx. Points with the same idx value will contribute to the
// same point of resulting CloudPoint
- for (std::vector<int>::const_iterator it = indices_->begin (); it != indices_->end (); ++it)
+ for (const auto& index : (*indices_))
{
if (!input_->is_dense)
// Check if the point is invalid
- if (!std::isfinite ((*input_)[*it].x) ||
- !std::isfinite ((*input_)[*it].y) ||
- !std::isfinite ((*input_)[*it].z))
+ if (!isXYZFinite ((*input_)[index]))
continue;
- int ijk0 = static_cast<int> (std::floor ((*input_)[*it].x * inverse_leaf_size_[0]) - static_cast<float> (min_b_[0]));
- int ijk1 = static_cast<int> (std::floor ((*input_)[*it].y * inverse_leaf_size_[1]) - static_cast<float> (min_b_[1]));
- int ijk2 = static_cast<int> (std::floor ((*input_)[*it].z * inverse_leaf_size_[2]) - static_cast<float> (min_b_[2]));
+ int ijk0 = static_cast<int> (std::floor ((*input_)[index].x * inverse_leaf_size_[0]) - static_cast<float> (min_b_[0]));
+ int ijk1 = static_cast<int> (std::floor ((*input_)[index].y * inverse_leaf_size_[1]) - static_cast<float> (min_b_[1]));
+ int ijk2 = static_cast<int> (std::floor ((*input_)[index].z * inverse_leaf_size_[2]) - static_cast<float> (min_b_[2]));
// Compute the centroid leaf index
int idx = ijk0 * divb_mul_[0] + ijk1 * divb_mul_[1] + ijk2 * divb_mul_[2];
- index_vector.emplace_back(static_cast<unsigned int> (idx), *it);
+ index_vector.emplace_back(static_cast<unsigned int> (idx), index);
}
}
}
// Fourth pass: compute centroids, insert them into their final position
- output.points.resize (total);
+ output.resize (total);
if (save_leaf_layout_)
{
try
#define PCL_VOXEL_GRID_COVARIANCE_IMPL_H_
#include <pcl/common/common.h>
-#include <pcl/filters/boost.h>
+#include <pcl/common/point_tests.h> // for isXYZFinite
#include <pcl/filters/voxel_grid_covariance.h>
-#include <Eigen/Dense>
#include <Eigen/Cholesky>
+#include <Eigen/Eigenvalues> // for SelfAdjointEigenSolver
+#include <boost/mpl/size.hpp> // for size
+#include <boost/random/mersenne_twister.hpp> // for mt19937
+#include <boost/random/normal_distribution.hpp> // for normal_distribution
+#include <boost/random/variate_generator.hpp> // for variate_generator
//////////////////////////////////////////////////////////////////////////////////////////
template<typename PointT> void
{
PCL_WARN ("[pcl::%s::applyFilter] No input dataset given!\n", getClassName ().c_str ());
output.width = output.height = 0;
- output.points.clear ();
+ output.clear ();
return;
}
// Copy the header (and thus the frame_id) + allocate enough space for points
output.height = 1; // downsampling breaks the organized structure
output.is_dense = true; // we filter out invalid points
- output.points.clear ();
+ output.clear ();
Eigen::Vector4f min_p, max_p;
// Get the minimum and maximum dimensions
if((dx*dy*dz) > std::numeric_limits<std::int32_t>::max())
{
- PCL_WARN("[pcl::%s::applyFilter] Leaf size is too small for the input dataset. Integer indices would overflow.", getClassName().c_str());
+ PCL_WARN("[pcl::%s::applyFilter] Leaf size is too small for the input dataset. Integer indices would overflow.\n", getClassName().c_str());
output.clear();
return;
}
}
// Second pass: go over all leaves and compute centroids and covariance matrices
- output.points.reserve (leaves_.size ());
+ output.reserve (leaves_.size ());
if (searchable_)
voxel_centroids_leaf_indices_.reserve (leaves_.size ());
int cp = 0;
// Do we need to process all the fields?
if (!downsample_all_data_)
{
- output.points.back ().x = leaf.centroid[0];
- output.points.back ().y = leaf.centroid[1];
- output.points.back ().z = leaf.centroid[2];
+ output.back ().x = leaf.centroid[0];
+ output.back ().y = leaf.centroid[1];
+ output.back ().z = leaf.centroid[2];
}
else
{
// ---[ RGB special case
if (rgba_index >= 0)
{
- pcl::RGB& rgb = *reinterpret_cast<RGB*> (reinterpret_cast<char*> (&output.points.back ()) + rgba_index);
+ pcl::RGB& rgb = *reinterpret_cast<RGB*> (reinterpret_cast<char*> (&output.back ()) + rgba_index);
rgb.a = leaf.centroid[centroid_size - 4];
rgb.r = leaf.centroid[centroid_size - 3];
rgb.g = leaf.centroid[centroid_size - 2];
voxel_centroids_leaf_indices_.push_back (static_cast<int> (it->first));
// Single pass covariance calculation
- leaf.cov_ = (leaf.cov_ - 2 * (pt_sum * leaf.mean_.transpose ())) / leaf.nr_points + leaf.mean_ * leaf.mean_.transpose ();
- leaf.cov_ *= (leaf.nr_points - 1.0) / leaf.nr_points;
+ leaf.cov_ = (leaf.cov_ - pt_sum * leaf.mean_.transpose()) / (leaf.nr_points - 1.0);
//Normalize Eigen Val such that max no more than 100x min.
eigensolver.compute (leaf.cov_);
int pnt_per_cell = 1000;
boost::mt19937 rng;
- boost::normal_distribution<> nd (0.0, leaf_size_.head (3).norm ());
+ boost::normal_distribution<> nd (0.0, 1.0);
boost::variate_generator<boost::mt19937&, boost::normal_distribution<> > var_nor (rng, nd);
Eigen::LLT<Eigen::Matrix3d> llt_of_cov;
#ifndef PCL_FILTERS_IMPL_VOXEL_GRID_OCCLUSION_ESTIMATION_H_
#define PCL_FILTERS_IMPL_VOXEL_GRID_OCCLUSION_ESTIMATION_H_
-#include <pcl/common/common.h>
#include <pcl/filters/voxel_grid_occlusion_estimation.h>
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
#include <pcl/filters/filter_indices.h>
-#include <pcl/search/pcl_search.h>
+#include <pcl/search/search.h> // for Search
namespace pcl
{
* \param[out] indices The resultant indices.
*/
void
- applyFilter (std::vector<int> &indices) override
+ applyFilter (Indices &indices) override
{
applyFilterIndices (indices);
}
* \param[out] indices The resultant indices.
*/
void
- applyFilterIndices (std::vector<int> &indices);
+ applyFilterIndices (Indices &indices);
private:
/** \brief A pointer to the spatial search object. */
* \param[out] indices The resultant indices.
*/
void
- applyFilter (std::vector<int> &indices) override
+ applyFilter (Indices &indices) override
{
applyFilterIndices (indices);
}
* \param[out] indices The resultant indices.
*/
void
- applyFilterIndices (std::vector<int> &indices);
+ applyFilterIndices (Indices &indices);
protected:
double normals_distance_weight_;
#pragma once
-#include <string>
-#include <pcl/pcl_base.h>
-#include <pcl/PointIndices.h>
-#include <pcl/conversions.h>
-#include <locale>
+#include <pcl/point_cloud.h> // for PointCloud
+#include <pcl/pcl_exports.h> // for PCL_EXPORTS
namespace pcl
{
*/
template <typename NormalT> inline std::vector<float>
assignNormalWeights (const PointCloud<NormalT>& cloud,
- int index,
- const std::vector<int>& k_indices,
+ index_t index,
+ const Indices& k_indices,
const std::vector<float>& k_sqr_distances)
{
pcl::utils::ignore(cloud, index);
template <typename NormalT> inline bool
refineNormal (const PointCloud<NormalT>& cloud,
int index,
- const std::vector<int>& k_indices,
+ const Indices& k_indices,
const std::vector<float>& k_sqr_distances,
NormalT& point)
{
*
* // Search parameters
* const int k = 5;
- * std::vector<std::vector<int> > k_indices;
+ * std::vector<Indices > k_indices;
* std::vector<std::vector<float> > k_sqr_distances;
*
* // Run search
* pcl::search::KdTree<pcl::PointXYZRGB> search;
* search.setInputCloud (cloud.makeShared ());
- * search.nearestKSearch (cloud, std::vector<int> (), k, k_indices, k_sqr_distances);
+ * search.nearestKSearch (cloud, Indices (), k, k_indices, k_sqr_distances);
*
* // Use search results for normal estimation
* pcl::NormalEstimation<PointT, NormalT> ne;
* @param k_indices indices of neighboring points
* @param k_sqr_distances squared distances to the neighboring points
*/
- NormalRefinement (const std::vector< std::vector<int> >& k_indices, const std::vector< std::vector<float> >& k_sqr_distances) :
+ NormalRefinement (const std::vector< Indices >& k_indices, const std::vector< std::vector<float> >& k_sqr_distances) :
Filter<NormalT>::Filter ()
{
filter_name_ = "NormalRefinement";
* @param k_sqr_distances squared distances to the neighboring points
*/
inline void
- setCorrespondences (const std::vector< std::vector<int> >& k_indices, const std::vector< std::vector<float> >& k_sqr_distances)
+ setCorrespondences (const std::vector< Indices >& k_indices, const std::vector< std::vector<float> >& k_sqr_distances)
{
k_indices_ = k_indices;
k_sqr_distances_ = k_sqr_distances;
* @param k_sqr_distances squared distances to the neighboring points
*/
inline void
- getCorrespondences (std::vector< std::vector<int> >& k_indices, std::vector< std::vector<float> >& k_sqr_distances)
+ getCorrespondences (std::vector< Indices >& k_indices, std::vector< std::vector<float> >& k_sqr_distances)
{
k_indices.assign (k_indices_.begin (), k_indices_.end ());
k_sqr_distances.assign (k_sqr_distances_.begin (), k_sqr_distances_.end ());
private:
/** \brief indices of neighboring points */
- std::vector< std::vector<int> > k_indices_;
+ std::vector< Indices > k_indices_;
/** \brief squared distances to the neighboring points */
std::vector< std::vector<float> > k_sqr_distances_;
#pragma once
-#include <pcl/filters/boost.h>
#include <pcl/filters/filter_indices.h>
-
+#include <boost/dynamic_bitset.hpp> // for dynamic_bitset
#include <ctime>
-#include <climits>
#include <random> // std::mt19937
namespace pcl
* \param[out] indices the resultant point cloud indices
*/
void
- applyFilter (std::vector<int> &indices) override;
+ applyFilter (Indices &indices) override;
bool
initCompute ();
#pragma once
+#include <cfloat> // for FLT_MIN, FLT_MAX
#include <pcl/pcl_macros.h>
#include <pcl/filters/filter_indices.h>
* \param[out] indices The resultant indices.
*/
void
- applyFilter (std::vector<int> &indices) override
+ applyFilter (Indices &indices) override
{
applyFilterIndices (indices);
}
* \param[out] indices The resultant indices.
*/
void
- applyFilterIndices (std::vector<int> &indices);
+ applyFilterIndices (Indices &indices);
private:
/** \brief The name of the field that will be used for filtering. */
limit_max = filter_limit_max_;
}
- /** \brief Set to true if we want to return the data outside the interval specified by setFilterLimits (min, max).
- * Default: false.
- * \param[in] limit_negative return data inside the interval (false) or outside (true)
- */
- PCL_DEPRECATED(1, 12, "use inherited FilterIndices::setNegative() instead")
- inline void
- setFilterLimitsNegative (const bool limit_negative)
- {
- negative_ = limit_negative;
- }
-
- /** \brief Get whether the data outside the interval (min/max) is to be returned (true) or inside (false).
- * \param[out] limit_negative true if data \b outside the interval [min; max] is to be returned, false otherwise
- */
- PCL_DEPRECATED(1, 12, "use inherited FilterIndices::getNegative() instead")
- inline void
- getFilterLimitsNegative (bool &limit_negative) const
- {
- limit_negative = negative_;
- }
-
- /** \brief Get whether the data outside the interval (min/max) is to be returned (true) or inside (false).
- * \return true if data \b outside the interval [min; max] is to be returned, false otherwise
- */
- PCL_DEPRECATED(1, 12, "use inherited FilterIndices::getNegative() instead")
- inline bool
- getFilterLimitsNegative () const
- {
- return (negative_);
- }
-
protected:
void
applyFilter (PCLPointCloud2 &output) override;
void
- applyFilter (std::vector<int> &indices) override;
+ applyFilter (Indices &indices) override;
private:
/** \brief The desired user filter field name. */
clipPlanarPolygon3D (const std::vector<PointT, Eigen::aligned_allocator<PointT> >& polygon, std::vector<PointT, Eigen::aligned_allocator<PointT> >& clipped_polygon) const;
virtual void
- clipPointCloud3D (const pcl::PointCloud<PointT> &cloud_in, std::vector<int>& clipped, const std::vector<int>& indices = std::vector<int> ()) const;
+ clipPointCloud3D (const pcl::PointCloud<PointT> &cloud_in, Indices& clipped, const Indices& indices = Indices ()) const;
virtual Clipper3D<PointT>*
clone () const;
#include <pcl/filters/filter.h>
#include <pcl/ModelCoefficients.h>
// Sample Consensus models
-#include <pcl/sample_consensus/model_types.h>
#include <pcl/sample_consensus/sac_model.h>
-#include <pcl/sample_consensus/sac_model_circle.h>
-#include <pcl/sample_consensus/sac_model_cylinder.h>
-#include <pcl/sample_consensus/sac_model_cone.h>
-#include <pcl/sample_consensus/sac_model_line.h>
-#include <pcl/sample_consensus/sac_model_normal_plane.h>
-#include <pcl/sample_consensus/sac_model_normal_sphere.h>
-#include <pcl/sample_consensus/sac_model_parallel_plane.h>
-#include <pcl/sample_consensus/sac_model_normal_parallel_plane.h>
-#include <pcl/sample_consensus/sac_model_parallel_line.h>
-#include <pcl/sample_consensus/sac_model_perpendicular_plane.h>
-#include <pcl/sample_consensus/sac_model_plane.h>
-#include <pcl/sample_consensus/sac_model_sphere.h>
namespace pcl
{
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/** \brief @b ProjectInliers uses a model and a set of inlier indices from a PointCloud to project them into a
* separate PointCloud.
- * \note setFilterFieldName (), setFilterLimits (), and setFilterLimitNegative () are ignored.
* \author Radu Bogdan Rusu
* \ingroup filters
*/
#pragma once
#include <pcl/filters/filter_indices.h>
-#include <pcl/search/pcl_search.h>
+#include <pcl/search/search.h> // for Search, Search<>::Ptr
namespace pcl
{
* \param[out] indices The resultant indices.
*/
void
- applyFilter (std::vector<int> &indices) override
+ applyFilter (Indices &indices) override
{
applyFilterIndices (indices);
}
* \param[out] indices The resultant indices.
*/
void
- applyFilterIndices (std::vector<int> &indices);
+ applyFilterIndices (Indices &indices);
private:
/** \brief A pointer to the spatial search object. */
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/** \brief @b RadiusOutlierRemoval is a simple filter that removes outliers if the number of neighbors in a certain
* search radius is smaller than a given K.
- * \note setFilterFieldName (), setFilterLimits (), and setFilterLimitNegative () are ignored.
* \author Radu Bogdan Rusu
* \ingroup filters
*/
applyFilter (PCLPointCloud2 &output) override;
void
- applyFilter (std::vector<int> &indices) override;
+ applyFilter (Indices &indices) override;
};
}
* \param indices the resultant point cloud indices
*/
void
- applyFilter (std::vector<int> &indices) override;
+ applyFilter (Indices &indices) override;
/** \brief Return a random number fast using a LCG (Linear Congruential Generator) algorithm.
* See http://software.intel.com/en-us/articles/fast-random-number-generator-on-the-intel-pentiumr-4-processor/ for more information.
* \param indices the resultant point cloud indices
*/
void
- applyFilter (std::vector<int> &indices) override;
+ applyFilter (Indices &indices) override;
/** \brief Return a random number fast using a LCG (Linear Congruential Generator) algorithm.
* See http://software.intel.com/en-us/articles/fast-random-number-generator-on-the-intel-pentiumr-4-processor/ for more information.
#include <pcl/filters/filter.h>
#include <ctime>
-#include <climits>
namespace pcl
{
void
partition (const PointCloud& cloud, const int first, const int last,
const Vector min_values, const Vector max_values,
- std::vector<int>& indices, PointCloud& outcloud);
+ Indices& indices, PointCloud& outcloud);
/** \brief Randomly sample the points in each grid.
* \param[in] data
*/
void
samplePartition (const PointCloud& data, const int first, const int last,
- std::vector<int>& indices, PointCloud& outcloud);
+ Indices& indices, PointCloud& outcloud);
/** \brief Returns the threshold for splitting in a given dimension.
* \param[in] cloud the input cloud
#pragma once
#include <pcl/filters/filter_indices.h>
-#include <ctime>
-#include <climits>
namespace pcl
{
* \param[out] indices the resultant point cloud indices
*/
void
- applyFilter (std::vector<int> &indices) override;
+ applyFilter (Indices &indices) override;
private:
#pragma once
#include <pcl/filters/filter_indices.h>
-#include <pcl/search/pcl_search.h>
+#include <pcl/search/search.h> // for Search
namespace pcl
{
* \param[out] indices The resultant indices.
*/
void
- applyFilter (std::vector<int> &indices) override
+ applyFilter (Indices &indices) override
{
applyFilterIndices (indices);
}
* \param[out] indices The resultant indices.
*/
void
- applyFilterIndices (std::vector<int> &indices);
+ applyFilterIndices (Indices &indices);
private:
/** \brief A pointer to the spatial search object. */
* Towards 3D Point Cloud Based Object Maps for Household Environments
* Robotics and Autonomous Systems Journal (Special Issue on Semantic Knowledge), 2008.
*
- * \note setFilterFieldName (), setFilterLimits (), and setFilterLimitNegative () are ignored.
* \author Radu Bogdan Rusu
* \ingroup filters
*/
KdTreePtr tree_;
void
- applyFilter (std::vector<int> &indices) override;
+ applyFilter (Indices &indices) override;
void
applyFilter (PCLPointCloud2 &output) override;
* The @b UniformSampling class creates a *3D voxel grid* (think about a voxel
* grid as a set of tiny 3D boxes in space) over the input point cloud data.
* Then, in each *voxel* (i.e., 3D box), all the points present will be
- * approximated (i.e., *downsampled*) with their centroid. This approach is
- * a bit slower than approximating them with the center of the voxel, but it
- * represents the underlying surface more accurately.
+ * approximated (i.e., *downsampled*) with the closest point to the center of the voxel.
*
* \author Radu Bogdan Rusu
* \ingroup filters
- */
+ */
template <typename PointT>
class UniformSampling: public Filter<PointT>
{
#pragma once
-#include <pcl/filters/boost.h>
#include <pcl/filters/filter.h>
-#include <map>
+#include <cfloat> // for FLT_MAX
namespace pcl
{
*/
template <typename PointT> void
getMinMax3D (const typename pcl::PointCloud<PointT>::ConstPtr &cloud,
- const std::vector<int> &indices,
+ const Indices &indices,
const std::string &distance_field_name, float min_distance, float max_distance,
Eigen::Vector4f &min_pt, Eigen::Vector4f &max_pt, bool limit_negative = false);
#pragma once
-#include <pcl/filters/boost.h>
#include <pcl/filters/voxel_grid.h>
#include <map>
#include <pcl/point_types.h>
struct Leaf
{
/** \brief Constructor.
- * Sets \ref nr_points, \ref icov_, \ref mean_ and \ref evals_ to 0 and \ref cov_ and \ref evecs_ to the identity matrix
+ * Sets \ref nr_points, \ref cov_, \ref icov_, \ref mean_ and \ref evals_ to 0 and \ref evecs_ to the identity matrix
*/
Leaf () :
nr_points (0),
mean_ (Eigen::Vector3d::Zero ()),
- cov_ (Eigen::Matrix3d::Identity ()),
+ cov_ (Eigen::Matrix3d::Zero ()),
icov_ (Eigen::Matrix3d::Zero ()),
evecs_ (Eigen::Matrix3d::Identity ()),
evals_ (Eigen::Vector3d::Zero ())
}
// Find k-nearest neighbors in the occupied voxel centroid cloud
- std::vector<int> k_indices;
+ Indices k_indices;
k = kdtree_.nearestKSearch (point, k, k_indices, k_sqr_distances);
// Find leaves corresponding to neighbors
k_leaves.reserve (k);
- for (const int &k_index : k_indices)
+ for (const auto &k_index : k_indices)
{
auto voxel = leaves_.find(voxel_centroids_leaf_indices_[k_index]);
if (voxel == leaves_.end()) {
}
// Find neighbors within radius in the occupied voxel centroid cloud
- std::vector<int> k_indices;
+ Indices k_indices;
const int k = kdtree_.radiusSearch (point, radius, k_indices, k_sqr_distances, max_nn);
// Find leaves corresponding to neighbors
k_leaves.reserve (k);
- for (const int &k_index : k_indices)
+ for (const auto &k_index : k_indices)
{
const auto voxel = leaves_.find(voxel_centroids_leaf_indices_[k_index]);
if(voxel == leaves_.end()) {
void
pcl::CropBox<pcl::PCLPointCloud2>::applyFilter (PCLPointCloud2 &output)
{
- std::vector<int> indices;
+ Indices indices;
if (keep_organized_)
{
bool temp = extract_removed_indices_;
///////////////////////////////////////////////////////////////////////////////
void
-pcl::CropBox<pcl::PCLPointCloud2>::applyFilter (std::vector<int> &indices)
+pcl::CropBox<pcl::PCLPointCloud2>::applyFilter (Indices &indices)
{
indices.resize (input_->width * input_->height);
removed_indices_->resize (input_->width * input_->height);
}
else if (extract_removed_indices_)
{
- (*removed_indices_)[removed_indices_count++] = static_cast<int> (index);
+ (*removed_indices_)[removed_indices_count++] = index;
}
}
// If inside the cropbox
{
if (negative_ && extract_removed_indices_)
{
- (*removed_indices_)[removed_indices_count++] = static_cast<int> (index);
+ (*removed_indices_)[removed_indices_count++] = index;
}
else if (!negative_) {
indices[indices_count++] = index;
else
{
// Prepare a vector holding all indices
- std::vector<int> all_indices (input_->width * input_->height);
- for (int i = 0; i < static_cast<int>(all_indices.size ()); ++i)
+ Indices all_indices (input_->width * input_->height);
+ for (index_t i = 0; i < static_cast<index_t>(all_indices.size ()); ++i)
all_indices[i] = i;
- std::vector<int> indices = *indices_;
+ Indices indices = *indices_;
std::sort (indices.begin (), indices.end ());
// Get the diference
- std::vector<int> remaining_indices;
+ Indices remaining_indices;
set_difference (all_indices.begin (), all_indices.end (), indices.begin (), indices.end (),
inserter (remaining_indices, remaining_indices.begin ()));
// Prepare the output and copy the data
- for (const int &remaining_index : remaining_indices)
+ for (const auto &remaining_index : remaining_indices)
for (std::size_t j = 0; j < output.fields.size(); ++j)
memcpy (&output.data[remaining_index * output.point_step + output.fields[j].offset],
&user_filter_value_, sizeof(float));
if (negative_)
{
// Prepare a vector holding all indices
- std::vector<int> all_indices (input_->width * input_->height);
- for (int i = 0; i < static_cast<int>(all_indices.size ()); ++i)
+ Indices all_indices (input_->width * input_->height);
+ for (index_t i = 0; i < static_cast<index_t>(all_indices.size ()); ++i)
all_indices[i] = i;
- std::vector<int> indices = *indices_;
+ Indices indices = *indices_;
std::sort (indices.begin (), indices.end ());
// Get the diference
- std::vector<int> remaining_indices;
+ Indices remaining_indices;
set_difference (all_indices.begin (), all_indices.end (), indices.begin (), indices.end (),
inserter (remaining_indices, remaining_indices.begin ()));
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void
-pcl::ExtractIndices<pcl::PCLPointCloud2>::applyFilter (std::vector<int> &indices)
+pcl::ExtractIndices<pcl::PCLPointCloud2>::applyFilter (Indices &indices)
{
if (indices_->size () > (input_->width * input_->height))
{
if (extract_removed_indices_)
{
// Set up the full indices set
- std::vector<int> full_indices (input_->width * input_->height);
- for (int fii = 0; fii < static_cast<int> (full_indices.size ()); ++fii) // fii = full indices iterator
+ Indices full_indices (input_->width * input_->height);
+ for (index_t fii = 0; fii < static_cast<index_t> (full_indices.size ()); ++fii) // fii = full indices iterator
full_indices[fii] = fii;
// Set up the sorted input indices
- std::vector<int> sorted_input_indices = *indices_;
+ Indices sorted_input_indices = *indices_;
std::sort (sorted_input_indices.begin (), sorted_input_indices.end ());
// Store the difference in removed_indices
else // Inverted functionality
{
// Set up the full indices set
- std::vector<int> full_indices (input_->width * input_->height);
- for (int fii = 0; fii < static_cast<int> (full_indices.size ()); ++fii) // fii = full indices iterator
+ Indices full_indices (input_->width * input_->height);
+ for (index_t fii = 0; fii < static_cast<index_t> (full_indices.size ()); ++fii) // fii = full indices iterator
full_indices[fii] = fii;
// Set up the sorted input indices
- std::vector<int> sorted_input_indices = *indices_;
+ Indices sorted_input_indices = *indices_;
std::sort (sorted_input_indices.begin (), sorted_input_indices.end ());
// Store the difference in indices
* \param output the resultant filtered point cloud dataset
*/
void
-pcl::FilterIndices<pcl::PCLPointCloud2>::filter (std::vector<int> &indices)
+pcl::FilterIndices<pcl::PCLPointCloud2>::filter (Indices &indices)
{
if (!initCompute ())
return;
}
void
-pcl::PassThrough<pcl::PCLPointCloud2>::applyFilter (std::vector<int> &indices)
+pcl::PassThrough<pcl::PCLPointCloud2>::applyFilter (Indices &indices)
{
// If input is not present, we cannot filter
if (!input_)
indices.resize (indices_->size ());
removed_indices_->resize (indices_->size ());
int oii = 0, rii = 0; // oii = output indices iterator, rii = removed indices iterator
- const std::uint32_t xyz_offset[3] = {input_->fields[x_idx_].offset, input_->fields[y_idx_].offset, input_->fields[z_idx_].offset};
- PCL_DEBUG ("[pcl::%s<pcl::PCLPointCloud2>::applyFilter] Field offsets: x: %u, y: %u, z: %u.\n", filter_name_.c_str (), xyz_offset[0], xyz_offset[1], xyz_offset[2]);
+ const auto x_offset = input_->fields[x_idx_].offset,
+ y_offset = input_->fields[y_idx_].offset,
+ z_offset = input_->fields[z_idx_].offset;
+ PCL_DEBUG ("[pcl::%s<pcl::PCLPointCloud2>::applyFilter] Field offsets: x: %zu, y: %zu, z: %zu.\n", filter_name_.c_str (),
+ static_cast<std::size_t>(x_offset), static_cast<std::size_t>(y_offset), static_cast<std::size_t>(z_offset));
// Has a field name been specified?
if (filter_field_name_.empty ())
for (const auto ii : indices) // ii = input index
{
float pt[3];
- memcpy (&pt[0], &input_->data[ii * input_->point_step + xyz_offset[0]], sizeof(float));
- memcpy (&pt[1], &input_->data[ii * input_->point_step + xyz_offset[1]], sizeof(float));
- memcpy (&pt[2], &input_->data[ii * input_->point_step + xyz_offset[2]], sizeof(float));
+ memcpy (&pt[0], &input_->data[ii * input_->point_step + x_offset], sizeof(float));
+ memcpy (&pt[1], &input_->data[ii * input_->point_step + y_offset], sizeof(float));
+ memcpy (&pt[2], &input_->data[ii * input_->point_step + z_offset], sizeof(float));
// Non-finite entries are always passed to removed indices
if (!std::isfinite (pt[0]) ||
!std::isfinite (pt[1]) ||
for (const auto ii : indices) // ii = input index
{
float pt[3];
- memcpy (&pt[0], &input_->data[ii * input_->point_step + xyz_offset[0]], sizeof(float));
- memcpy (&pt[1], &input_->data[ii * input_->point_step + xyz_offset[1]], sizeof(float));
- memcpy (&pt[2], &input_->data[ii * input_->point_step + xyz_offset[2]], sizeof(float));
+ memcpy (&pt[0], &input_->data[ii * input_->point_step + x_offset], sizeof(float));
+ memcpy (&pt[1], &input_->data[ii * input_->point_step + y_offset], sizeof(float));
+ memcpy (&pt[2], &input_->data[ii * input_->point_step + z_offset], sizeof(float));
// Non-finite entries are always passed to removed indices
if (!std::isfinite (pt[0]) ||
!std::isfinite (pt[1]) ||
searcher_->setInputCloud (cloud);
// Allocate enough space to hold the results
- std::vector<int> nn_indices (indices_->size ());
+ Indices nn_indices (indices_->size ());
std::vector<float> nn_dists (indices_->size ());
// Copy the common fields
}
void
-pcl::RadiusOutlierRemoval<pcl::PCLPointCloud2>::applyFilter (std::vector<int> &indices)
+pcl::RadiusOutlierRemoval<pcl::PCLPointCloud2>::applyFilter (Indices &indices)
{
if (search_radius_ == 0.0)
{
searcher_->setInputCloud (cloud);
// The arrays to be used
- std::vector<int> nn_indices (indices_->size ());
+ Indices nn_indices (indices_->size ());
std::vector<float> nn_dists (indices_->size ());
indices.resize (indices_->size ());
removed_indices_->resize (indices_->size ());
void
pcl::RandomSample<pcl::PCLPointCloud2>::applyFilter (PCLPointCloud2 &output)
{
- std::vector<int> indices;
+ Indices indices;
if (keep_organized_)
{
bool temp = extract_removed_indices_;
///////////////////////////////////////////////////////////////////////////////
void
-pcl::RandomSample<pcl::PCLPointCloud2>::applyFilter (std::vector<int> &indices)
+pcl::RandomSample<pcl::PCLPointCloud2>::applyFilter (Indices &indices)
{
// Note: this function does not have to access input_ at all
std::size_t N = indices_->size ();
///////////////////////////////////////////////////////////////////////////////////////////
void
-pcl::StatisticalOutlierRemoval<pcl::PCLPointCloud2>::applyFilter (std::vector<int>& indices)
+pcl::StatisticalOutlierRemoval<pcl::PCLPointCloud2>::applyFilter (Indices& indices)
{
// If fields x/y/z are not present, we cannot filter
if (x_idx_ == UNAVAILABLE || y_idx_ == UNAVAILABLE || z_idx_ == UNAVAILABLE)
tree_->setInputCloud (cloud);
// Allocate enough space to hold the results
- std::vector<int> nn_indices (mean_k_);
+ Indices nn_indices (mean_k_);
std::vector<float> nn_dists (mean_k_);
distances.resize (indices_->size ());
#include <pcl/common/io.h>
#include <pcl/filters/impl/voxel_grid.hpp>
#include <boost/sort/spreadsort/integer_sort.hpp>
+#include <array>
using Array4size_t = Eigen::Array<std::size_t, 4, 1>;
if( (dx*dy*dz) > static_cast<std::int64_t>(std::numeric_limits<std::int32_t>::max()) )
{
- PCL_WARN("[pcl::%s::applyFilter] Leaf size is too small for the input dataset. Integer indices would overflow.", getClassName().c_str());
+ PCL_WARN("[pcl::%s::applyFilter] Leaf size is too small for the input dataset. Integer indices would overflow.\n", getClassName().c_str());
//output.width = output.height = 0;
//output.data.clear();
//return;
// we need to skip all the same, adjacenent idx values
std::size_t total = 0;
std::size_t index = 0;
+ // first_and_last_indices_vector[i] represents the index in index_vector of the first point in
+ // index_vector belonging to the voxel which corresponds to the i-th output point,
+ // and of the first point not belonging to.
+ std::vector<std::pair<index_t, index_t> > first_and_last_indices_vector;
+ // Worst case size
+ first_and_last_indices_vector.reserve (index_vector.size ());
while (index < index_vector.size ())
{
std::size_t i = index + 1;
while (i < index_vector.size () && index_vector[i].idx == index_vector[index].idx)
++i;
- ++total;
+ if ((i - index) >= min_points_per_voxel_)
+ {
+ ++total;
+ first_and_last_indices_vector.emplace_back(index, i);
+ }
index = i;
}
xyz_offset = Array4size_t (0, 4, 8, 12);
index=0;
- Eigen::VectorXf centroid = Eigen::VectorXf::Zero (centroid_size);
Eigen::VectorXf temporary = Eigen::VectorXf::Zero (centroid_size);
+ const std::array<index_t, 3> field_indices {x_idx_, y_idx_, z_idx_};
- for (std::size_t cp = 0; cp < index_vector.size ();)
+ for (const auto &cp : first_and_last_indices_vector)
{
- std::size_t point_offset = index_vector[cp].cloud_point_index * input_->point_step;
- // Do we need to process all the fields?
- if (!downsample_all_data_)
- {
- memcpy (&pt[0], &input_->data[point_offset+input_->fields[x_idx_].offset], sizeof (float));
- memcpy (&pt[1], &input_->data[point_offset+input_->fields[y_idx_].offset], sizeof (float));
- memcpy (&pt[2], &input_->data[point_offset+input_->fields[z_idx_].offset], sizeof (float));
- centroid[0] = pt[0];
- centroid[1] = pt[1];
- centroid[2] = pt[2];
- centroid[3] = 0;
- }
- else
+ // calculate centroid - sum values from all input points, that have the same idx value in index_vector array
+ index_t first_index = cp.first;
+ index_t last_index = cp.second;
+
+ // index is centroid final position in resulting PointCloud
+ if (save_leaf_layout_)
+ leaf_layout_[index_vector[first_index].idx] = index;
+
+ //Limit downsampling to coords
+ if (!downsample_all_data_)
{
- // ---[ RGB special case
- // fill extra r/g/b centroid field
- if (rgba_index >= 0)
+ Eigen::Vector4f centroid (Eigen::Vector4f::Zero ());
+
+ for (index_t li = first_index; li < last_index; ++li)
{
- pcl::RGB rgb;
- memcpy (&rgb, &input_->data[point_offset + input_->fields[rgba_index].offset], sizeof (RGB));
- centroid[centroid_size-4] = rgb.r;
- centroid[centroid_size-3] = rgb.g;
- centroid[centroid_size-2] = rgb.b;
- centroid[centroid_size-1] = rgb.a;
+ std::size_t point_offset = index_vector[li].cloud_point_index * input_->point_step;
+ for (uint8_t ind=0; ind<3; ++ind)
+ {
+ memcpy (&pt[ind], &input_->data[point_offset+input_->fields[field_indices[ind]].offset], sizeof (float));
+ }
+ centroid += pt;
}
- // Copy all the fields
- for (std::size_t d = 0; d < input_->fields.size (); ++d)
- memcpy (¢roid[d], &input_->data[point_offset + input_->fields[d].offset], field_sizes_[d]);
- }
- std::size_t i = cp + 1;
- while (i < index_vector.size () && index_vector[i].idx == index_vector[cp].idx)
- {
- std::size_t point_offset = index_vector[i].cloud_point_index * input_->point_step;
- if (!downsample_all_data_)
+ centroid /= static_cast<float> (last_index - first_index);
+ for (uint8_t ind=0; ind<3; ++ind)
{
- memcpy (&pt[0], &input_->data[point_offset+input_->fields[x_idx_].offset], sizeof (float));
- memcpy (&pt[1], &input_->data[point_offset+input_->fields[y_idx_].offset], sizeof (float));
- memcpy (&pt[2], &input_->data[point_offset+input_->fields[z_idx_].offset], sizeof (float));
- centroid[0] += pt[0];
- centroid[1] += pt[1];
- centroid[2] += pt[2];
+ memcpy (&output.data[xyz_offset[ind]], ¢roid[ind], sizeof (float));
}
- else
- {
+ xyz_offset += output.point_step;
+ }
+ else
+ {
+
+ Eigen::VectorXf centroid = Eigen::VectorXf::Zero (centroid_size);
+ // fill in the accumulator with leaf points
+ for (index_t li = first_index; li < last_index; ++li) {
+ std::size_t point_offset = index_vector[li].cloud_point_index * input_->point_step;
// ---[ RGB special case
// fill extra r/g/b centroid field
if (rgba_index >= 0)
{
- pcl::RGB rgb;
- memcpy (&rgb, &input_->data[point_offset + input_->fields[rgba_index].offset], sizeof (RGB));
- temporary[centroid_size-4] = rgb.r;
- temporary[centroid_size-3] = rgb.g;
- temporary[centroid_size-2] = rgb.b;
- temporary[centroid_size-1] = rgb.a;
+ pcl::RGB rgb;
+ memcpy (&rgb, &input_->data[point_offset + input_->fields[rgba_index].offset], sizeof (RGB));
+ temporary[centroid_size-4] = rgb.r;
+ temporary[centroid_size-3] = rgb.g;
+ temporary[centroid_size-2] = rgb.b;
+ temporary[centroid_size-1] = rgb.a;
}
// Copy all the fields
for (std::size_t d = 0; d < input_->fields.size (); ++d)
memcpy (&temporary[d], &input_->data[point_offset + input_->fields[d].offset], field_sizes_[d]);
centroid += temporary;
}
- ++i;
- }
-
- // Save leaf layout information for fast access to cells relative to current position
- if (save_leaf_layout_)
- leaf_layout_[index_vector[cp].idx] = static_cast<int> (index);
-
- // Normalize the centroid
- centroid /= static_cast<float> (i - cp);
-
- // Do we need to process all the fields?
- if (!downsample_all_data_)
- {
- // Copy the data
- memcpy (&output.data[xyz_offset[0]], ¢roid[0], sizeof (float));
- memcpy (&output.data[xyz_offset[1]], ¢roid[1], sizeof (float));
- memcpy (&output.data[xyz_offset[2]], ¢roid[2], sizeof (float));
- xyz_offset += output.point_step;
- }
- else
- {
+ centroid /= static_cast<float> (last_index - first_index);
+
std::size_t point_offset = index * output.point_step;
// Copy all the fields
for (std::size_t d = 0; d < output.fields.size (); ++d)
memcpy (&output.data[point_offset + output.fields[rgba_index].offset], &rgb, sizeof (float));
}
}
- cp = i;
+
++index;
}
}
*/
#include <pcl/filters/impl/voxel_grid_covariance.hpp>
-#include <pcl/filters/impl/voxel_grid.hpp>
#ifndef PCL_NO_PRECOMPILE
#include <pcl/point_types.h>
{
PCL_WARN ("[pcl::%s::applyFilter] No input dataset given!\n", getClassName ().c_str ());
output.width = output.height = 0;
- output.points.clear ();
+ output.clear ();
return;
}
if( (dx*dy*dz) > static_cast<std::int64_t>(std::numeric_limits<std::int32_t>::max()) )
{
- PCL_WARN("[pcl::%s::applyFilter] Leaf size is too small for the input dataset. Integer indices would overflow.", getClassName().c_str());
+ PCL_WARN("[pcl::%s::applyFilter] Leaf size is too small for the input dataset. Integer indices would overflow.\n", getClassName().c_str());
output.clear();
return;
}
}
// Fourth pass: compute centroids, insert them into their final position
- output.points.resize (total);
+ output.resize (total);
if (save_leaf_layout_)
{
try
#pragma once
#ifdef __GNUC__
-# pragma GCC system_header
+#pragma GCC system_header
#endif
#include <boost/operators.hpp>
#pragma once
#ifdef __GNUC__
-# pragma GCC system_header
+#pragma GCC system_header
#endif
+PCL_DEPRECATED_HEADER(1, 15, "Please include the needed eigen headers directly.")
#include <Eigen/Core>
#include <Eigen/StdVector>
#include <vector>
-namespace pcl
+namespace pcl {
+namespace geometry {
+/**
+ * \brief Get a collection of boundary half-edges for the input mesh.
+ * \param[in] mesh The input mesh.
+ * \param[out] boundary_he_collection Collection of boundary half-edges. Each element in
+ * the vector is one connected boundary. The whole boundary is the union of all
+ * elements.
+ * \param [in] expected_size If you already know the size of the longest
+ * boundary you can tell this here. Defaults to 3 (minimum possible boundary).
+ * \author Martin Saelzle
+ * \ingroup geometry
+ */
+template <class MeshT>
+void
+getBoundBoundaryHalfEdges(
+ const MeshT& mesh,
+ std::vector<typename MeshT::HalfEdgeIndices>& boundary_he_collection,
+ const std::size_t expected_size = 3)
{
- namespace geometry
- {
- /** \brief Get a collection of boundary half-edges for the input mesh.
- * \param[in] mesh The input mesh.
- * \param[out] boundary_he_collection Collection of boundary half-edges. Each element in the vector is one connected boundary. The whole boundary is the union of all elements.
- * \param [in] expected_size If you already know the size of the longest boundary you can tell this here. Defaults to 3 (minimum possible boundary).
- * \author Martin Saelzle
- * \ingroup geometry
- */
- template <class MeshT> void
- getBoundBoundaryHalfEdges (const MeshT& mesh,
- std::vector <typename MeshT::HalfEdgeIndices>& boundary_he_collection,
- const std::size_t expected_size = 3)
- {
- using Mesh = MeshT;
- using HalfEdgeIndex = typename Mesh::HalfEdgeIndex;
- using HalfEdgeIndices = typename Mesh::HalfEdgeIndices;
- using IHEAFC = typename Mesh::InnerHalfEdgeAroundFaceCirculator;
+ using Mesh = MeshT;
+ using HalfEdgeIndex = typename Mesh::HalfEdgeIndex;
+ using HalfEdgeIndices = typename Mesh::HalfEdgeIndices;
+ using IHEAFC = typename Mesh::InnerHalfEdgeAroundFaceCirculator;
- boundary_he_collection.clear ();
+ boundary_he_collection.clear();
- HalfEdgeIndices boundary_he; boundary_he.reserve (expected_size);
- std::vector <bool> visited (mesh.sizeEdges (), false);
- IHEAFC circ, circ_end;
+ HalfEdgeIndices boundary_he;
+ boundary_he.reserve(expected_size);
+ std::vector<bool> visited(mesh.sizeEdges(), false);
+ IHEAFC circ, circ_end;
- for (HalfEdgeIndex i (0); i<HalfEdgeIndex (mesh.sizeHalfEdges ()); ++i)
- {
- if (mesh.isBoundary (i) && !visited [pcl::geometry::toEdgeIndex (i).get ()])
- {
- boundary_he.clear ();
+ for (HalfEdgeIndex i(0); i < HalfEdgeIndex(mesh.sizeHalfEdges()); ++i) {
+ if (mesh.isBoundary(i) && !visited[pcl::geometry::toEdgeIndex(i).get()]) {
+ boundary_he.clear();
- circ = mesh.getInnerHalfEdgeAroundFaceCirculator (i);
- circ_end = circ;
- do
- {
- visited [pcl::geometry::toEdgeIndex (circ.getTargetIndex ()).get ()] = true;
- boundary_he.push_back (circ.getTargetIndex ());
- } while (++circ != circ_end);
+ circ = mesh.getInnerHalfEdgeAroundFaceCirculator(i);
+ circ_end = circ;
+ do {
+ visited[pcl::geometry::toEdgeIndex(circ.getTargetIndex()).get()] = true;
+ boundary_he.push_back(circ.getTargetIndex());
+ } while (++circ != circ_end);
- boundary_he_collection.push_back (boundary_he);
- }
- }
+ boundary_he_collection.push_back(boundary_he);
}
+ }
+}
- } // End namespace geometry
+} // End namespace geometry
} // End namespace pcl
#include <pcl/geometry/polygon_operations.h>
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-template<typename PointT> void
-pcl::approximatePolygon (const PlanarPolygon<PointT>& polygon, PlanarPolygon<PointT>& approx_polygon, float threshold, bool refine, bool closed)
+template <typename PointT>
+void
+pcl::approximatePolygon(const PlanarPolygon<PointT>& polygon,
+ PlanarPolygon<PointT>& approx_polygon,
+ float threshold,
+ bool refine,
+ bool closed)
{
- const Eigen::Vector4f& coefficients = polygon.getCoefficients ();
- const typename pcl::PointCloud<PointT>::VectorType &contour = polygon.getContour ();
-
- Eigen::Vector3f rotation_axis (coefficients[1], -coefficients[0], 0.0f);
- rotation_axis.normalize ();
+ const Eigen::Vector4f& coefficients = polygon.getCoefficients();
+ const typename pcl::PointCloud<PointT>::VectorType& contour = polygon.getContour();
- float rotation_angle = acosf (coefficients [2]);
- Eigen::Affine3f transformation = Eigen::Translation3f (0, 0, coefficients [3]) * Eigen::AngleAxisf (rotation_angle, rotation_axis);
+ Eigen::Vector3f rotation_axis(coefficients[1], -coefficients[0], 0.0f);
+ rotation_axis.normalize();
- typename pcl::PointCloud<PointT>::VectorType polygon2D (contour.size ());
- for (std::size_t pIdx = 0; pIdx < polygon2D.size (); ++pIdx)
- polygon2D [pIdx].getVector3fMap () = transformation * contour [pIdx].getVector3fMap ();
+ float rotation_angle = acosf(coefficients[2]);
+ Eigen::Affine3f transformation = Eigen::Translation3f(0, 0, coefficients[3]) *
+ Eigen::AngleAxisf(rotation_angle, rotation_axis);
+
+ typename pcl::PointCloud<PointT>::VectorType polygon2D(contour.size());
+ for (std::size_t pIdx = 0; pIdx < polygon2D.size(); ++pIdx)
+ polygon2D[pIdx].getVector3fMap() = transformation * contour[pIdx].getVector3fMap();
typename pcl::PointCloud<PointT>::VectorType approx_polygon2D;
- approximatePolygon2D<PointT> (polygon2D, approx_polygon2D, threshold, refine, closed);
-
- typename pcl::PointCloud<PointT>::VectorType &approx_contour = approx_polygon.getContour ();
- approx_contour.resize (approx_polygon2D.size ());
-
- Eigen::Affine3f inv_transformation = transformation.inverse ();
- for (std::size_t pIdx = 0; pIdx < approx_polygon2D.size (); ++pIdx)
- approx_contour [pIdx].getVector3fMap () = inv_transformation * approx_polygon2D [pIdx].getVector3fMap ();
+ approximatePolygon2D<PointT>(polygon2D, approx_polygon2D, threshold, refine, closed);
+
+ typename pcl::PointCloud<PointT>::VectorType& approx_contour =
+ approx_polygon.getContour();
+ approx_contour.resize(approx_polygon2D.size());
+
+ Eigen::Affine3f inv_transformation = transformation.inverse();
+ for (std::size_t pIdx = 0; pIdx < approx_polygon2D.size(); ++pIdx)
+ approx_contour[pIdx].getVector3fMap() =
+ inv_transformation * approx_polygon2D[pIdx].getVector3fMap();
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointT> void
-pcl::approximatePolygon2D (const typename pcl::PointCloud<PointT>::VectorType &polygon,
- typename pcl::PointCloud<PointT>::VectorType &approx_polygon,
- float threshold, bool refine, bool closed)
+template <typename PointT>
+void
+pcl::approximatePolygon2D(const typename pcl::PointCloud<PointT>::VectorType& polygon,
+ typename pcl::PointCloud<PointT>::VectorType& approx_polygon,
+ float threshold,
+ bool refine,
+ bool closed)
{
- approx_polygon.clear ();
- if (polygon.size () < 3)
+ approx_polygon.clear();
+ if (polygon.size() < 3)
return;
-
- std::vector<std::pair<unsigned, unsigned> > intervals;
- std::pair<unsigned,unsigned> interval (0, 0);
-
- if (closed)
- {
+
+ std::vector<std::pair<unsigned, unsigned>> intervals;
+ std::pair<unsigned, unsigned> interval(0, 0);
+
+ if (closed) {
float max_distance = .0f;
- for (std::size_t idx = 1; idx < polygon.size (); ++idx)
- {
- float distance = (polygon [0].x - polygon [idx].x) * (polygon [0].x - polygon [idx].x) +
- (polygon [0].y - polygon [idx].y) * (polygon [0].y - polygon [idx].y);
+ for (std::size_t idx = 1; idx < polygon.size(); ++idx) {
+ float distance =
+ (polygon[0].x - polygon[idx].x) * (polygon[0].x - polygon[idx].x) +
+ (polygon[0].y - polygon[idx].y) * (polygon[0].y - polygon[idx].y);
- if (distance > max_distance)
- {
+ if (distance > max_distance) {
max_distance = distance;
interval.second = idx;
}
}
- for (std::size_t idx = 1; idx < polygon.size (); ++idx)
- {
- float distance = (polygon [interval.second].x - polygon [idx].x) * (polygon [interval.second].x - polygon [idx].x) +
- (polygon [interval.second].y - polygon [idx].y) * (polygon [interval.second].y - polygon [idx].y);
+ for (std::size_t idx = 1; idx < polygon.size(); ++idx) {
+ float distance = (polygon[interval.second].x - polygon[idx].x) *
+ (polygon[interval.second].x - polygon[idx].x) +
+ (polygon[interval.second].y - polygon[idx].y) *
+ (polygon[interval.second].y - polygon[idx].y);
- if (distance > max_distance)
- {
+ if (distance > max_distance) {
max_distance = distance;
interval.first = idx;
}
if (max_distance < threshold * threshold)
return;
- intervals.push_back (interval);
- std::swap (interval.first, interval.second);
- intervals.push_back (interval);
+ intervals.push_back(interval);
+ std::swap(interval.first, interval.second);
+ intervals.push_back(interval);
}
- else
- {
+ else {
interval.first = 0;
- interval.second = static_cast<unsigned int> (polygon.size ()) - 1;
- intervals.push_back (interval);
+ interval.second = static_cast<unsigned int>(polygon.size()) - 1;
+ intervals.push_back(interval);
}
-
+
std::vector<unsigned> result;
// recursively refine
- while (!intervals.empty ())
- {
- std::pair<unsigned, unsigned>& currentInterval = intervals.back ();
- float line_x = polygon [currentInterval.first].y - polygon [currentInterval.second].y;
- float line_y = polygon [currentInterval.second].x - polygon [currentInterval.first].x;
- float line_d = polygon [currentInterval.first].x * polygon [currentInterval.second].y - polygon [currentInterval.first].y * polygon [currentInterval.second].x;
-
- float linelen = 1.0f / std::sqrt (line_x * line_x + line_y * line_y);
-
+ while (!intervals.empty()) {
+ std::pair<unsigned, unsigned>& currentInterval = intervals.back();
+ float line_x = polygon[currentInterval.first].y - polygon[currentInterval.second].y;
+ float line_y = polygon[currentInterval.second].x - polygon[currentInterval.first].x;
+ float line_d =
+ polygon[currentInterval.first].x * polygon[currentInterval.second].y -
+ polygon[currentInterval.first].y * polygon[currentInterval.second].x;
+
+ float linelen = 1.0f / std::sqrt(line_x * line_x + line_y * line_y);
+
line_x *= linelen;
line_y *= linelen;
line_d *= linelen;
-
+
float max_distance = 0.0;
unsigned first_index = currentInterval.first + 1;
unsigned max_index = 0;
// => 0-crossing
- if (currentInterval.first > currentInterval.second)
- {
- for (std::size_t idx = first_index; idx < polygon.size(); idx++)
- {
- float distance = std::abs (line_x * polygon[idx].x + line_y * polygon[idx].y + line_d);
- if (distance > max_distance)
- {
+ if (currentInterval.first > currentInterval.second) {
+ for (std::size_t idx = first_index; idx < polygon.size(); idx++) {
+ float distance =
+ std::abs(line_x * polygon[idx].x + line_y * polygon[idx].y + line_d);
+ if (distance > max_distance) {
max_distance = distance;
- max_index = idx;
+ max_index = idx;
}
}
first_index = 0;
}
- for (unsigned int idx = first_index; idx < currentInterval.second; idx++)
- {
- float distance = std::abs (line_x * polygon[idx].x + line_y * polygon[idx].y + line_d);
- if (distance > max_distance)
- {
+ for (unsigned int idx = first_index; idx < currentInterval.second; idx++) {
+ float distance =
+ std::abs(line_x * polygon[idx].x + line_y * polygon[idx].y + line_d);
+ if (distance > max_distance) {
max_distance = distance;
- max_index = idx;
+ max_index = idx;
}
}
- if (max_distance > threshold)
- {
- std::pair<unsigned, unsigned> interval (max_index, currentInterval.second);
+ if (max_distance > threshold) {
+ std::pair<unsigned, unsigned> interval(max_index, currentInterval.second);
currentInterval.second = max_index;
- intervals.push_back (interval);
+ intervals.push_back(interval);
}
- else
- {
- result.push_back (currentInterval.second);
- intervals.pop_back ();
+ else {
+ result.push_back(currentInterval.second);
+ intervals.pop_back();
}
}
-
- approx_polygon.reserve (result.size ());
- if (refine)
- {
- std::vector<Eigen::Vector3f, Eigen::aligned_allocator<Eigen::Vector3f> > lines (result.size ());
- std::reverse (result.begin (), result.end ());
- for (std::size_t rIdx = 0; rIdx < result.size (); ++rIdx)
- {
+
+ approx_polygon.reserve(result.size());
+ if (refine) {
+ std::vector<Eigen::Vector3f, Eigen::aligned_allocator<Eigen::Vector3f>> lines(
+ result.size());
+ std::reverse(result.begin(), result.end());
+ for (std::size_t rIdx = 0; rIdx < result.size(); ++rIdx) {
std::size_t nIdx = rIdx + 1;
- if (nIdx == result.size ())
+ if (nIdx == result.size())
nIdx = 0;
-
- Eigen::Vector2f centroid = Eigen::Vector2f::Zero ();
- Eigen::Matrix2f covariance = Eigen::Matrix2f::Zero ();
+
+ Eigen::Vector2f centroid = Eigen::Vector2f::Zero();
+ Eigen::Matrix2f covariance = Eigen::Matrix2f::Zero();
std::size_t pIdx = result[rIdx];
unsigned num_points = 0;
- if (pIdx > result[nIdx])
- {
- num_points = static_cast<unsigned> (polygon.size ()) - pIdx;
- for (; pIdx < polygon.size (); ++pIdx)
- {
- covariance.coeffRef (0) += polygon [pIdx].x * polygon [pIdx].x;
- covariance.coeffRef (1) += polygon [pIdx].x * polygon [pIdx].y;
- covariance.coeffRef (3) += polygon [pIdx].y * polygon [pIdx].y;
- centroid [0] += polygon [pIdx].x;
- centroid [1] += polygon [pIdx].y;
+ if (pIdx > result[nIdx]) {
+ num_points = static_cast<unsigned>(polygon.size()) - pIdx;
+ for (; pIdx < polygon.size(); ++pIdx) {
+ covariance.coeffRef(0) += polygon[pIdx].x * polygon[pIdx].x;
+ covariance.coeffRef(1) += polygon[pIdx].x * polygon[pIdx].y;
+ covariance.coeffRef(3) += polygon[pIdx].y * polygon[pIdx].y;
+ centroid[0] += polygon[pIdx].x;
+ centroid[1] += polygon[pIdx].y;
}
pIdx = 0;
}
-
+
num_points += result[nIdx] - pIdx;
- for (; pIdx < result[nIdx]; ++pIdx)
- {
- covariance.coeffRef (0) += polygon [pIdx].x * polygon [pIdx].x;
- covariance.coeffRef (1) += polygon [pIdx].x * polygon [pIdx].y;
- covariance.coeffRef (3) += polygon [pIdx].y * polygon [pIdx].y;
- centroid [0] += polygon [pIdx].x;
- centroid [1] += polygon [pIdx].y;
+ for (; pIdx < result[nIdx]; ++pIdx) {
+ covariance.coeffRef(0) += polygon[pIdx].x * polygon[pIdx].x;
+ covariance.coeffRef(1) += polygon[pIdx].x * polygon[pIdx].y;
+ covariance.coeffRef(3) += polygon[pIdx].y * polygon[pIdx].y;
+ centroid[0] += polygon[pIdx].x;
+ centroid[1] += polygon[pIdx].y;
}
-
- covariance.coeffRef (2) = covariance.coeff (1);
-
- float norm = 1.0f / float (num_points);
+
+ covariance.coeffRef(2) = covariance.coeff(1);
+
+ float norm = 1.0f / float(num_points);
centroid *= norm;
covariance *= norm;
- covariance.coeffRef (0) -= centroid [0] * centroid [0];
- covariance.coeffRef (1) -= centroid [0] * centroid [1];
- covariance.coeffRef (3) -= centroid [1] * centroid [1];
-
+ covariance.coeffRef(0) -= centroid[0] * centroid[0];
+ covariance.coeffRef(1) -= centroid[0] * centroid[1];
+ covariance.coeffRef(3) -= centroid[1] * centroid[1];
+
float eval;
Eigen::Vector2f normal;
- eigen22 (covariance, eval, normal);
+ eigen22(covariance, eval, normal);
// select the one which is more "parallel" to the original line
Eigen::Vector2f direction;
- direction [0] = polygon[result[nIdx]].x - polygon[result[rIdx]].x;
- direction [1] = polygon[result[nIdx]].y - polygon[result[rIdx]].y;
- direction.normalize ();
-
- if (std::abs (direction.dot (normal)) > float(M_SQRT1_2))
- {
- std::swap (normal [0], normal [1]);
- normal [0] = -normal [0];
+ direction[0] = polygon[result[nIdx]].x - polygon[result[rIdx]].x;
+ direction[1] = polygon[result[nIdx]].y - polygon[result[rIdx]].y;
+ direction.normalize();
+
+ if (std::abs(direction.dot(normal)) > float(M_SQRT1_2)) {
+ std::swap(normal[0], normal[1]);
+ normal[0] = -normal[0];
}
-
+
// needs to be on the left side of the edge
- if (direction [0] * normal [1] < direction [1] * normal [0])
+ if (direction[0] * normal[1] < direction[1] * normal[0])
normal *= -1.0;
-
- lines [rIdx].head<2> ().matrix () = normal;
- lines [rIdx] [2] = -normal.dot (centroid);
+
+ lines[rIdx].head<2>().matrix() = normal;
+ lines[rIdx][2] = -normal.dot(centroid);
}
-
+
float threshold2 = threshold * threshold;
- for (std::size_t rIdx = 0; rIdx < lines.size (); ++rIdx)
- {
+ for (std::size_t rIdx = 0; rIdx < lines.size(); ++rIdx) {
std::size_t nIdx = rIdx + 1;
- if (nIdx == result.size ())
- nIdx = 0;
-
- Eigen::Vector3f vertex = lines [rIdx].cross (lines [nIdx]);
- vertex /= vertex [2];
- vertex [2] = 0.0;
-
- PointT point;
- // test whether we need another edge since the intersection point is too far away from the original vertex
- Eigen::Vector3f pq = polygon [result[nIdx]].getVector3fMap () - vertex;
- pq [2] = 0.0;
-
- float distance = pq.squaredNorm ();
- if (distance > threshold2)
- {
+ if (nIdx == result.size())
+ nIdx = 0;
+
+ Eigen::Vector3f vertex = lines[rIdx].cross(lines[nIdx]);
+ vertex /= vertex[2];
+ vertex[2] = 0.0;
+
+ PointT point;
+ // test whether we need another edge since the intersection point is too far away
+ // from the original vertex
+ Eigen::Vector3f pq = polygon[result[nIdx]].getVector3fMap() - vertex;
+ pq[2] = 0.0;
+
+ float distance = pq.squaredNorm();
+ if (distance > threshold2) {
// test whether the old point is inside the new polygon or outside
- if ((pq [0] * lines [rIdx] [0] + pq [1] * lines [rIdx] [1] < 0.0) &&
- (pq [0] * lines [nIdx] [0] + pq [1] * lines [nIdx] [1] < 0.0) )
- {
- float distance1 = lines [rIdx] [0] * polygon[result[nIdx]].x + lines [rIdx] [1] * polygon[result[nIdx]].y + lines [rIdx] [2];
- float distance2 = lines [nIdx] [0] * polygon[result[nIdx]].x + lines [nIdx] [1] * polygon[result[nIdx]].y + lines [nIdx] [2];
+ if ((pq[0] * lines[rIdx][0] + pq[1] * lines[rIdx][1] < 0.0) &&
+ (pq[0] * lines[nIdx][0] + pq[1] * lines[nIdx][1] < 0.0)) {
+ float distance1 = lines[rIdx][0] * polygon[result[nIdx]].x +
+ lines[rIdx][1] * polygon[result[nIdx]].y + lines[rIdx][2];
+ float distance2 = lines[nIdx][0] * polygon[result[nIdx]].x +
+ lines[nIdx][1] * polygon[result[nIdx]].y + lines[nIdx][2];
- point.x = polygon[result[nIdx]].x - distance1 * lines [rIdx] [0];
- point.y = polygon[result[nIdx]].y - distance1 * lines [rIdx] [1];
+ point.x = polygon[result[nIdx]].x - distance1 * lines[rIdx][0];
+ point.y = polygon[result[nIdx]].y - distance1 * lines[rIdx][1];
- approx_polygon.push_back (point);
+ approx_polygon.push_back(point);
- vertex [0] = polygon[result[nIdx]].x - distance2 * lines [nIdx] [0];
- vertex [1] = polygon[result[nIdx]].y - distance2 * lines [nIdx] [1];
+ vertex[0] = polygon[result[nIdx]].x - distance2 * lines[nIdx][0];
+ vertex[1] = polygon[result[nIdx]].y - distance2 * lines[nIdx][1];
}
}
- point.getVector3fMap () = vertex;
- approx_polygon.push_back (point);
+ point.getVector3fMap() = vertex;
+ approx_polygon.push_back(point);
}
}
- else
- {
- // we have a new polygon in results, but inverted (clockwise <-> counter-clockwise)
- for (std::vector<unsigned>::reverse_iterator it = result.rbegin (); it != result.rend (); ++it)
- approx_polygon.push_back (polygon [*it]);
+ else {
+ // we have a new polygon in results, but inverted (clockwise <-> counter-clockwise)
+ for (std::vector<unsigned>::reverse_iterator it = result.rbegin();
+ it != result.rend();
+ ++it)
+ approx_polygon.push_back(polygon[*it]);
}
}
#include "organized_index_iterator.h"
-namespace pcl
-{
-/** \brief Organized Index Iterator for iterating over the "pixels" for a given line using the Bresenham algorithm.
- * Supports 4 and 8 neighborhood connectivity
- * \note iterator does not visit the given end-point (on purpose).
- * \author Suat Gedikli <gedikli@willowgarage.com>
- * \ingroup geometry
- */
-class LineIterator : public OrganizedIndexIterator
-{
- public:
- /** \brief Neighborhood connectivity */
- enum Neighborhood {Neighbor4 = 4, Neighbor8 = 8};
- public:
- /** \brief Constructor
- * \param x_start column of the start pixel of the line
- * \param y_start row of the start pixel of the line
- * \param x_end column of the end pixel of the line
- * \param y_end row of the end pixel of the line
- * \param width width of the organized structure e.g. image/cloud/map etc..
- * \param neighborhood connectivity of the neighborhood
- */
- LineIterator (unsigned x_start, unsigned y_start, unsigned x_end, unsigned y_end, unsigned width, const Neighborhood& neighborhood = Neighbor8);
-
- /** \brief Destructor*/
- ~LineIterator ();
-
- void operator ++ () override;
- unsigned getRowIndex () const override;
- unsigned getColumnIndex () const override;
- bool isValid () const override;
- void reset () override;
- protected:
- /** \brief initializes the variables for the Bresenham algorithm
- * \param[in] neighborhood connectivity to the neighborhood. Either 4 or 8
- */
- void init (const Neighborhood& neighborhood);
-
- /** \brief current column index*/
- unsigned x_;
-
- /** \brief current row index*/
- unsigned y_;
-
- /** \brief column index of first pixel/point*/
- unsigned x_start_;
-
- /** \brief row index of first pixel/point*/
- unsigned y_start_;
-
- /** \brief column index of end pixel/point*/
- unsigned x_end_;
-
- /** \brief row index of end pixel/point*/
- unsigned y_end_;
-
- // calculated values
- /** \brief current distance to the line*/
- int error_;
-
- /** \brief error threshold*/
- int error_max_;
-
- /** \brief increment of error (distance) value in case of an y-step (if dx > dy)*/
- int error_minus_;
-
- /** \brief increment of error (distance) value in case of just an x-step (if dx > dy)*/
- int error_plus_;
-
- /** \brief increment of column index in case of just an x-step (if dx > dy)*/
- int x_plus_;
-
- /** \brief increment of row index in case of just an x-step (if dx > dy)*/
- int y_plus_;
-
- /** \brief increment of column index in case of just an y-step (if dx > dy)*/
- int x_minus_;
-
- /** \brief increment of row index in case of just an y-step (if dx > dy)*/
- int y_minus_;
-
- /** \brief increment pixel/point index in case of just an x-step (if dx > dy)*/
- int index_plus_;
-
- /** \brief increment pixel/point index in case of just an y-step (if dx > dy)*/
- int index_minus_;
+namespace pcl {
+/**
+ * \brief Organized Index Iterator for iterating over the "pixels" for a given line
+ * using the Bresenham algorithm. Supports 4 and 8 neighborhood connectivity
+ * \note iterator does not visit the given end-point (on purpose).
+ * \author Suat Gedikli <gedikli@willowgarage.com>
+ * \ingroup geometry
+ */
+class LineIterator : public OrganizedIndexIterator {
+public:
+ /** \brief Neighborhood connectivity */
+ enum Neighborhood { Neighbor4 = 4, Neighbor8 = 8 };
+
+public:
+ /**
+ * \brief Constructor
+ * \param x_start column of the start pixel of the line
+ * \param y_start row of the start pixel of the line
+ * \param x_end column of the end pixel of the line
+ * \param y_end row of the end pixel of the line
+ * \param width width of the organized structure e.g. image/cloud/map etc..
+ * \param neighborhood connectivity of the neighborhood
+ */
+ LineIterator(unsigned x_start,
+ unsigned y_start,
+ unsigned x_end,
+ unsigned y_end,
+ unsigned width,
+ const Neighborhood& neighborhood = Neighbor8);
+
+ /** \brief Destructor*/
+ ~LineIterator();
+
+ void
+ operator++() override;
+ unsigned
+ getRowIndex() const override;
+ unsigned
+ getColumnIndex() const override;
+ bool
+ isValid() const override;
+ void
+ reset() override;
+
+protected:
+ /**
+ * \brief initializes the variables for the Bresenham algorithm
+ * \param[in] neighborhood connectivity to the neighborhood. Either 4 or 8
+ */
+ void
+ init(const Neighborhood& neighborhood);
+
+ /** \brief current column index*/
+ unsigned x_;
+
+ /** \brief current row index*/
+ unsigned y_;
+
+ /** \brief column index of first pixel/point*/
+ unsigned x_start_;
+
+ /** \brief row index of first pixel/point*/
+ unsigned y_start_;
+
+ /** \brief column index of end pixel/point*/
+ unsigned x_end_;
+
+ /** \brief row index of end pixel/point*/
+ unsigned y_end_;
+
+ // calculated values
+ /** \brief current distance to the line*/
+ int error_;
+
+ /** \brief error threshold*/
+ int error_max_;
+
+ /** \brief increment of error (distance) value in case of an y-step (if dx > dy)*/
+ int error_minus_;
+
+ /** \brief increment of error (distance) value in case of just an x-step (if dx >
+ * dy)*/
+ int error_plus_;
+
+ /** \brief increment of column index in case of just an x-step (if dx > dy)*/
+ int x_plus_;
+
+ /** \brief increment of row index in case of just an x-step (if dx > dy)*/
+ int y_plus_;
+
+ /** \brief increment of column index in case of just an y-step (if dx > dy)*/
+ int x_minus_;
+
+ /** \brief increment of row index in case of just an y-step (if dx > dy)*/
+ int y_minus_;
+
+ /** \brief increment pixel/point index in case of just an x-step (if dx > dy)*/
+ int index_plus_;
+
+ /** \brief increment pixel/point index in case of just an y-step (if dx > dy)*/
+ int index_minus_;
};
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
-
////////////////////////////////////////////////////////////////////////////////
-inline LineIterator::LineIterator (unsigned x_start, unsigned y_start, unsigned x_end, unsigned y_end, unsigned width, const Neighborhood& neighborhood)
-: OrganizedIndexIterator (width)
-, x_start_ (x_start)
-, y_start_ (y_start)
-, x_end_ (x_end)
-, y_end_ (y_end)
+inline LineIterator::LineIterator(unsigned x_start,
+ unsigned y_start,
+ unsigned x_end,
+ unsigned y_end,
+ unsigned width,
+ const Neighborhood& neighborhood)
+: OrganizedIndexIterator(width)
+, x_start_(x_start)
+, y_start_(y_start)
+, x_end_(x_end)
+, y_end_(y_end)
{
- init (neighborhood);
+ init(neighborhood);
}
////////////////////////////////////////////////////////////////////////////////
-inline LineIterator::~LineIterator ()
-{
-}
+inline LineIterator::~LineIterator() {}
////////////////////////////////////////////////////////////////////////////////
inline void
-LineIterator::init (const Neighborhood& neighborhood)
+LineIterator::init(const Neighborhood& neighborhood)
{
x_ = x_start_;
y_ = y_start_;
int delta_x = x_end_ - x_start_;
int delta_y = y_end_ - y_start_;
-
- int x_dir = ( (delta_x > 0) ? 1 : -1 ) ;
- int y_dir = ( (delta_y > 0) ? 1 : -1 ) ;
+
+ int x_dir = ((delta_x > 0) ? 1 : -1);
+ int y_dir = ((delta_y > 0) ? 1 : -1);
delta_x *= x_dir;
delta_y *= y_dir;
-
- if(delta_x >= delta_y)
- {
- if( neighborhood == Neighbor4 )
- {
+
+ if (delta_x >= delta_y) {
+ if (neighborhood == Neighbor4) {
error_max_ = delta_x - delta_y;
x_minus_ = 0;
y_minus_ = y_dir;
- x_plus_ = x_dir;
- y_plus_ = 0;
+ x_plus_ = x_dir;
+ y_plus_ = 0;
error_minus_ = -(delta_x * 2);
- error_plus_ = (delta_y * 2);
+ error_plus_ = (delta_y * 2);
}
- else
- {
+ else {
error_max_ = delta_x - (delta_y * 2);
x_minus_ = x_dir;
y_minus_ = y_dir;
- x_plus_ = x_dir;
- y_plus_ = 0;
-
+ x_plus_ = x_dir;
+ y_plus_ = 0;
+
error_minus_ = (delta_y - delta_x) * 2;
- error_plus_ = (delta_y * 2);
+ error_plus_ = (delta_y * 2);
}
}
- else
- {
- if( neighborhood == Neighbor4 )
- {
+ else {
+ if (neighborhood == Neighbor4) {
error_max_ = delta_y - delta_x;
x_minus_ = x_dir;
y_minus_ = 0;
- x_plus_ = 0;
- y_plus_ = y_dir;
+ x_plus_ = 0;
+ y_plus_ = y_dir;
error_minus_ = -(delta_y * 2);
- error_plus_ = (delta_x * 2);
+ error_plus_ = (delta_x * 2);
}
- else
- {
+ else {
error_max_ = delta_y - (delta_x * 2);
x_minus_ = x_dir;
y_minus_ = y_dir;
- x_plus_ = 0;
- y_plus_ = y_dir;
+ x_plus_ = 0;
+ y_plus_ = y_dir;
error_minus_ = (delta_x - delta_y) * 2;
- error_plus_ = (delta_x * 2);
+ error_plus_ = (delta_x * 2);
}
}
index_minus_ = x_minus_ + y_minus_ * width_;
- index_plus_ = x_plus_ + y_plus_ * width_;
+ index_plus_ = x_plus_ + y_plus_ * width_;
}
////////////////////////////////////////////////////////////////////////////////
inline void
-LineIterator::operator ++ ()
+LineIterator::operator++()
{
- if (error_ >= error_max_ )
- {
+ if (error_ >= error_max_) {
x_ += x_minus_;
y_ += y_minus_;
error_ += error_minus_;
index_ += index_minus_;
}
- else
- {
+ else {
x_ += x_plus_;
y_ += y_plus_;
error_ += error_plus_;
index_ += index_plus_;
- }
+ }
}
////////////////////////////////////////////////////////////////////////////////
inline unsigned
-LineIterator::getRowIndex () const
+LineIterator::getRowIndex() const
{
return y_;
}
////////////////////////////////////////////////////////////////////////////////
inline unsigned
-LineIterator::getColumnIndex () const
+LineIterator::getColumnIndex() const
{
return x_;
}
////////////////////////////////////////////////////////////////////////////////
inline bool
-LineIterator::isValid () const
+LineIterator::isValid() const
{
return (x_ != x_end_ || y_ != y_end_);
}
////////////////////////////////////////////////////////////////////////////////
inline void
-LineIterator::reset ()
+LineIterator::reset()
{
x_ = x_start_;
y_ = y_start_;
#pragma once
-#include <pcl/geometry/boost.h>
-#include <pcl/geometry/eigen.h>
#include <pcl/geometry/mesh_circulators.h>
-#include <pcl/geometry/mesh_indices.h>
#include <pcl/geometry/mesh_elements.h>
+#include <pcl/geometry/mesh_indices.h>
#include <pcl/geometry/mesh_traits.h>
#include <pcl/memory.h>
#include <pcl/pcl_macros.h>
#include <pcl/point_cloud.h>
-#include <vector>
#include <type_traits>
+#include <vector>
+
////////////////////////////////////////////////////////////////////////////////
// Global variables used during testing
////////////////////////////////////////////////////////////////////////////////
#ifdef PCL_GEOMETRY_MESH_BASE_TEST_DELETE_FACE_MANIFOLD_2
-namespace pcl
-{
- namespace geometry
- {
- bool g_pcl_geometry_mesh_base_test_delete_face_manifold_2_success;
- } // End namespace geometry
+namespace pcl {
+namespace geometry {
+bool g_pcl_geometry_mesh_base_test_delete_face_manifold_2_success;
+} // End namespace geometry
} // End namespace pcl
#endif
// Forward declarations
////////////////////////////////////////////////////////////////////////////////
-namespace pcl
-{
- namespace geometry
- {
- template <class MeshT>
- class MeshIO;
- } // End namespace geometry
+namespace pcl {
+namespace geometry {
+template <class MeshT>
+class MeshIO;
+} // End namespace geometry
} // End namespace pcl
////////////////////////////////////////////////////////////////////////////////
// MeshBase
////////////////////////////////////////////////////////////////////////////////
-namespace pcl
-{
- namespace geometry
- {
- /** \brief Base class for the half-edge mesh.
- * \tparam DerivedT Has to implement the method 'addFaceImpl'. Please have a look at pcl::geometry::TriangleMesh, pcl::geometry::QuadMesh and pcl::geometry::PolygonMesh.
- * \tparam MeshTraitsT Please have a look at pcl::geometry::DefaultMeshTraits.
- * \tparam MeshTagT Tag describing the type of the mesh, e.g. TriangleMeshTag, QuadMeshTag, PolygonMeshTag.
- * \author Martin Saelzle
- * \ingroup geometry
- * \todo Add documentation
- */
- template <class DerivedT, class MeshTraitsT, class MeshTagT>
- class MeshBase
- {
- public:
-
- using Self = MeshBase <DerivedT, MeshTraitsT, MeshTagT>;
- using Ptr = shared_ptr<Self>;
- using ConstPtr = shared_ptr<const Self>;
-
- using Derived = DerivedT;
-
- // These have to be defined in the traits class.
- using VertexData = typename MeshTraitsT::VertexData;
- using HalfEdgeData = typename MeshTraitsT::HalfEdgeData;
- using EdgeData = typename MeshTraitsT::EdgeData;
- using FaceData = typename MeshTraitsT::FaceData;
- using IsManifold = typename MeshTraitsT::IsManifold;
-
- // Check if the mesh traits are defined correctly.
- static_assert (std::is_convertible<IsManifold, bool>::value, "MeshTraitsT::IsManifold is not convertible to bool");
-
- using MeshTag = MeshTagT;
-
- // Data
- using HasVertexData = std::integral_constant <bool, !std::is_same <VertexData , pcl::geometry::NoData>::value>;
- using HasHalfEdgeData = std::integral_constant <bool, !std::is_same <HalfEdgeData, pcl::geometry::NoData>::value>;
- using HasEdgeData = std::integral_constant <bool, !std::is_same <EdgeData , pcl::geometry::NoData>::value>;
- using HasFaceData = std::integral_constant <bool, !std::is_same <FaceData , pcl::geometry::NoData>::value>;
-
- using VertexDataCloud = pcl::PointCloud<VertexData>;
- using HalfEdgeDataCloud = pcl::PointCloud<HalfEdgeData>;
- using EdgeDataCloud = pcl::PointCloud<EdgeData>;
- using FaceDataCloud = pcl::PointCloud<FaceData>;
-
- // Indices
- using VertexIndex = pcl::geometry::VertexIndex;
- using HalfEdgeIndex = pcl::geometry::HalfEdgeIndex;
- using EdgeIndex = pcl::geometry::EdgeIndex;
- using FaceIndex = pcl::geometry::FaceIndex;
-
- using VertexIndices = std::vector<VertexIndex>;
- using HalfEdgeIndices = std::vector<HalfEdgeIndex>;
- using EdgeIndices = std::vector<EdgeIndex>;
- using FaceIndices = std::vector<FaceIndex>;
-
- // Circulators
- using VertexAroundVertexCirculator = pcl::geometry::VertexAroundVertexCirculator<const Self>;
- using OutgoingHalfEdgeAroundVertexCirculator = pcl::geometry::OutgoingHalfEdgeAroundVertexCirculator<const Self>;
- using IncomingHalfEdgeAroundVertexCirculator = pcl::geometry::IncomingHalfEdgeAroundVertexCirculator<const Self>;
- using FaceAroundVertexCirculator = pcl::geometry::FaceAroundVertexCirculator<const Self>;
- using VertexAroundFaceCirculator = pcl::geometry::VertexAroundFaceCirculator<const Self>;
- using InnerHalfEdgeAroundFaceCirculator = pcl::geometry::InnerHalfEdgeAroundFaceCirculator<const Self>;
- using OuterHalfEdgeAroundFaceCirculator = pcl::geometry::OuterHalfEdgeAroundFaceCirculator<const Self>;
- using FaceAroundFaceCirculator = pcl::geometry::FaceAroundFaceCirculator<const Self>;
-
- /** \brief Constructor. */
- MeshBase ()
- : vertex_data_cloud_ (),
- half_edge_data_cloud_ (),
- edge_data_cloud_ (),
- face_data_cloud_ ()
- {
- }
+namespace pcl {
+namespace geometry {
+/**
+ * \brief Base class for the half-edge mesh.
+ * \tparam DerivedT Has to implement the method 'addFaceImpl'. Please have a look at
+ * pcl::geometry::TriangleMesh, pcl::geometry::QuadMesh and pcl::geometry::PolygonMesh.
+ * \tparam MeshTraitsT Please have a look at pcl::geometry::DefaultMeshTraits.
+ * \tparam MeshTagT Tag describing the type of the mesh, e.g. TriangleMeshTag,
+ * QuadMeshTag, PolygonMeshTag.
+ * \author Martin Saelzle
+ * \ingroup geometry
+ * \todo Add documentation
+ */
+template <class DerivedT, class MeshTraitsT, class MeshTagT>
+class MeshBase {
+public:
+ using Self = MeshBase<DerivedT, MeshTraitsT, MeshTagT>;
+ using Ptr = shared_ptr<Self>;
+ using ConstPtr = shared_ptr<const Self>;
+
+ using Derived = DerivedT;
+
+ // These have to be defined in the traits class.
+ using VertexData = typename MeshTraitsT::VertexData;
+ using HalfEdgeData = typename MeshTraitsT::HalfEdgeData;
+ using EdgeData = typename MeshTraitsT::EdgeData;
+ using FaceData = typename MeshTraitsT::FaceData;
+ using IsManifold = typename MeshTraitsT::IsManifold;
+
+ // Check if the mesh traits are defined correctly.
+ static_assert(std::is_convertible<IsManifold, bool>::value,
+ "MeshTraitsT::IsManifold is not convertible to bool");
+
+ using MeshTag = MeshTagT;
+
+ // Data
+ using HasVertexData =
+ std::integral_constant<bool,
+ !std::is_same<VertexData, pcl::geometry::NoData>::value>;
+ using HasHalfEdgeData =
+ std::integral_constant<bool,
+ !std::is_same<HalfEdgeData, pcl::geometry::NoData>::value>;
+ using HasEdgeData =
+ std::integral_constant<bool,
+ !std::is_same<EdgeData, pcl::geometry::NoData>::value>;
+ using HasFaceData =
+ std::integral_constant<bool,
+ !std::is_same<FaceData, pcl::geometry::NoData>::value>;
+
+ using VertexDataCloud = pcl::PointCloud<VertexData>;
+ using HalfEdgeDataCloud = pcl::PointCloud<HalfEdgeData>;
+ using EdgeDataCloud = pcl::PointCloud<EdgeData>;
+ using FaceDataCloud = pcl::PointCloud<FaceData>;
+
+ // Indices
+ using VertexIndex = pcl::geometry::VertexIndex;
+ using HalfEdgeIndex = pcl::geometry::HalfEdgeIndex;
+ using EdgeIndex = pcl::geometry::EdgeIndex;
+ using FaceIndex = pcl::geometry::FaceIndex;
+
+ using VertexIndices = std::vector<VertexIndex>;
+ using HalfEdgeIndices = std::vector<HalfEdgeIndex>;
+ using EdgeIndices = std::vector<EdgeIndex>;
+ using FaceIndices = std::vector<FaceIndex>;
+
+ // Circulators
+ using VertexAroundVertexCirculator =
+ pcl::geometry::VertexAroundVertexCirculator<const Self>;
+ using OutgoingHalfEdgeAroundVertexCirculator =
+ pcl::geometry::OutgoingHalfEdgeAroundVertexCirculator<const Self>;
+ using IncomingHalfEdgeAroundVertexCirculator =
+ pcl::geometry::IncomingHalfEdgeAroundVertexCirculator<const Self>;
+ using FaceAroundVertexCirculator =
+ pcl::geometry::FaceAroundVertexCirculator<const Self>;
+ using VertexAroundFaceCirculator =
+ pcl::geometry::VertexAroundFaceCirculator<const Self>;
+ using InnerHalfEdgeAroundFaceCirculator =
+ pcl::geometry::InnerHalfEdgeAroundFaceCirculator<const Self>;
+ using OuterHalfEdgeAroundFaceCirculator =
+ pcl::geometry::OuterHalfEdgeAroundFaceCirculator<const Self>;
+ using FaceAroundFaceCirculator = pcl::geometry::FaceAroundFaceCirculator<const Self>;
+
+ /** \brief Constructor. */
+ MeshBase()
+ : vertex_data_cloud_()
+ , half_edge_data_cloud_()
+ , edge_data_cloud_()
+ , face_data_cloud_()
+ {}
+
+ ////////////////////////////////////////////////////////////////////////
+ // addVertex / addFace / deleteVertex / deleteEdge / deleteFace / cleanUp
+ ////////////////////////////////////////////////////////////////////////
+
+ /**
+ * \brief Add a vertex to the mesh.
+ * \param[in] vertex_data Data that is stored in the vertex. This is only added if the
+ * mesh has data associated with the vertices.
+ * \return Index to the new vertex.
+ */
+ inline VertexIndex
+ addVertex(const VertexData& vertex_data = VertexData())
+ {
+ vertices_.push_back(Vertex());
+ this->addData(vertex_data_cloud_, vertex_data, HasVertexData());
+ return (VertexIndex(static_cast<int>(this->sizeVertices() - 1)));
+ }
+
+ /**
+ * \brief Add a face to the mesh. Data is only added if it is associated with the
+ * elements. The last vertex is connected with the first one.
+ * \param[in] vertices Indices to the vertices of the new face.
+ * \param[in] face_data Data that is set for the face.
+ * \param[in] half_edge_data Data that is set for all added half-edges.
+ * \param[in] edge_data Data that is set for all added edges.
+ * \return Index to the new face. Failure is signaled by returning an invalid face
+ * index.
+ * \warning The vertices must be valid and unique (each vertex may be contained
+ * only once). Not complying with this requirement results in undefined behavior!
+ */
+ inline FaceIndex
+ addFace(const VertexIndices& vertices,
+ const FaceData& face_data = FaceData(),
+ const EdgeData& edge_data = EdgeData(),
+ const HalfEdgeData& half_edge_data = HalfEdgeData())
+ {
+ // NOTE: The derived class has to implement addFaceImpl. If needed it can use the
+ // general method addFaceImplBase.
+ return (static_cast<Derived*>(this)->addFaceImpl(
+ vertices, face_data, edge_data, half_edge_data));
+ }
+
+ /**
+ * \brief Mark the given vertex and all connected half-edges and faces as deleted.
+ * \note Call cleanUp () to finally delete all mesh-elements.
+ */
+ void
+ deleteVertex(const VertexIndex& idx_vertex)
+ {
+ assert(this->isValid(idx_vertex));
+ if (this->isDeleted(idx_vertex))
+ return;
+
+ delete_faces_vertex_.clear();
+ FaceAroundVertexCirculator circ = this->getFaceAroundVertexCirculator(idx_vertex);
+ const FaceAroundVertexCirculator circ_end = circ;
+ do {
+ if (circ.getTargetIndex().isValid()) // Check for boundary.
+ {
+ delete_faces_vertex_.push_back(circ.getTargetIndex());
+ }
+ } while (++circ != circ_end);
+
+ for (FaceIndices::const_iterator it = delete_faces_vertex_.begin();
+ it != delete_faces_vertex_.end();
+ ++it) {
+ this->deleteFace(*it);
+ }
+ }
+
+ /**
+ * \brief Mark the given half-edge, the opposite half-edge and the associated faces
+ * as deleted.
+ * \note Call cleanUp () to finally delete all mesh-elements.
+ */
+ void
+ deleteEdge(const HalfEdgeIndex& idx_he)
+ {
+ assert(this->isValid(idx_he));
+ if (this->isDeleted(idx_he))
+ return;
+
+ HalfEdgeIndex opposite = this->getOppositeHalfEdgeIndex(idx_he);
+
+ if (this->isBoundary(idx_he))
+ this->markDeleted(idx_he);
+ else
+ this->deleteFace(this->getFaceIndex(idx_he));
+ if (this->isBoundary(opposite))
+ this->markDeleted(opposite);
+ else
+ this->deleteFace(this->getFaceIndex(opposite));
+ }
+
+ /**
+ * \brief Mark the given edge (both half-edges) and the associated faces as deleted.
+ * \note Call cleanUp () to finally delete all mesh-elements.
+ */
+ inline void
+ deleteEdge(const EdgeIndex& idx_edge)
+ {
+ assert(this->isValid(idx_edge));
+ this->deleteEdge(pcl::geometry::toHalfEdgeIndex(idx_edge));
+ assert(this->isDeleted(
+ pcl::geometry::toHalfEdgeIndex(idx_edge, false))); // Bug in this class!
+ }
+
+ /**
+ * \brief Mark the given face as deleted. More faces are deleted if the manifold mesh
+ * would become non-manifold.
+ * \note Call cleanUp () to finally delete all mesh-elements.
+ */
+ inline void
+ deleteFace(const FaceIndex& idx_face)
+ {
+ assert(this->isValid(idx_face));
+ if (this->isDeleted(idx_face))
+ return;
+
+ this->deleteFace(idx_face, IsManifold());
+ }
+
+ /**
+ * \brief Removes all mesh elements and data that are marked as deleted.
+ * \note This removes all isolated vertices as well.
+ */
+ void
+ cleanUp()
+ {
+ // Copy the non-deleted mesh elements and store the index to their new position
+ const VertexIndices new_vertex_indices =
+ this->remove<Vertices, VertexDataCloud, VertexIndices, HasVertexData>(
+ vertices_, vertex_data_cloud_);
+ const HalfEdgeIndices new_half_edge_indices =
+ this->remove<HalfEdges, HalfEdgeDataCloud, HalfEdgeIndices, HasHalfEdgeData>(
+ half_edges_, half_edge_data_cloud_);
+ const FaceIndices new_face_indices =
+ this->remove<Faces, FaceDataCloud, FaceIndices, HasFaceData>(faces_,
+ face_data_cloud_);
+
+ // Remove deleted edge data
+ if (HasEdgeData::value) {
+ auto it_ed_old = edge_data_cloud_.begin();
+ auto it_ed_new = edge_data_cloud_.begin();
+
+ for (auto it_ind = new_half_edge_indices.cbegin(),
+ it_ind_end = new_half_edge_indices.cend();
+ it_ind != it_ind_end;
+ it_ind += 2, ++it_ed_old) {
+ if (it_ind->isValid()) {
+ *it_ed_new++ = *it_ed_old;
+ }
+ }
+ edge_data_cloud_.resize(this->sizeEdges());
+ }
+
+ // Adjust the indices
+ for (VertexIterator it = vertices_.begin(); it != vertices_.end(); ++it) {
+ if (it->idx_outgoing_half_edge_.isValid()) {
+ it->idx_outgoing_half_edge_ =
+ new_half_edge_indices[it->idx_outgoing_half_edge_.get()];
+ }
+ }
+
+ for (HalfEdgeIterator it = half_edges_.begin(); it != half_edges_.end(); ++it) {
+ it->idx_terminating_vertex_ =
+ new_vertex_indices[it->idx_terminating_vertex_.get()];
+ it->idx_next_half_edge_ = new_half_edge_indices[it->idx_next_half_edge_.get()];
+ it->idx_prev_half_edge_ = new_half_edge_indices[it->idx_prev_half_edge_.get()];
+ if (it->idx_face_.isValid()) {
+ it->idx_face_ = new_face_indices[it->idx_face_.get()];
+ }
+ }
+
+ for (FaceIterator it = faces_.begin(); it != faces_.end(); ++it) {
+ it->idx_inner_half_edge_ = new_half_edge_indices[it->idx_inner_half_edge_.get()];
+ }
+ }
+
+ ////////////////////////////////////////////////////////////////////////
+ // Vertex connectivity
+ ////////////////////////////////////////////////////////////////////////
+
+ /** \brief Get the outgoing half-edge index to a given vertex. */
+ inline HalfEdgeIndex
+ getOutgoingHalfEdgeIndex(const VertexIndex& idx_vertex) const
+ {
+ assert(this->isValid(idx_vertex));
+ return (this->getVertex(idx_vertex).idx_outgoing_half_edge_);
+ }
- ////////////////////////////////////////////////////////////////////////
- // addVertex / addFace / deleteVertex / deleteEdge / deleteFace / cleanUp
- ////////////////////////////////////////////////////////////////////////
+ /** \brief Get the incoming half-edge index to a given vertex. */
+ inline HalfEdgeIndex
+ getIncomingHalfEdgeIndex(const VertexIndex& idx_vertex) const
+ {
+ assert(this->isValid(idx_vertex));
+ return (this->getOppositeHalfEdgeIndex(this->getOutgoingHalfEdgeIndex(idx_vertex)));
+ }
- /** \brief Add a vertex to the mesh.
- * \param[in] vertex_data Data that is stored in the vertex. This is only added if the mesh has data associated with the vertices.
- * \return Index to the new vertex.
- */
- inline VertexIndex
- addVertex (const VertexData& vertex_data=VertexData ())
- {
- vertices_.push_back (Vertex ());
- this->addData (vertex_data_cloud_, vertex_data, HasVertexData ());
- return (VertexIndex (static_cast <int> (this->sizeVertices () - 1)));
- }
+ ////////////////////////////////////////////////////////////////////////
+ // Half-edge connectivity
+ ////////////////////////////////////////////////////////////////////////
- /** \brief Add a face to the mesh. Data is only added if it is associated with the elements. The last vertex is connected with the first one.
- * \param[in] vertices Indices to the vertices of the new face.
- * \param[in] face_data Data that is set for the face.
- * \param[in] half_edge_data Data that is set for all added half-edges.
- * \param[in] edge_data Data that is set for all added edges.
- * \return Index to the new face. Failure is signaled by returning an invalid face index.
- * \warning The vertices must be valid and unique (each vertex may be contained only once). Not complying with this requirement results in undefined behavior!
- */
- inline FaceIndex
- addFace (const VertexIndices& vertices,
- const FaceData& face_data = FaceData (),
- const EdgeData& edge_data = EdgeData (),
- const HalfEdgeData& half_edge_data = HalfEdgeData ())
- {
- // NOTE: The derived class has to implement addFaceImpl. If needed it can use the general method addFaceImplBase.
- return (static_cast <Derived*> (this)->addFaceImpl (vertices, face_data, edge_data, half_edge_data));
- }
+ /** \brief Get the terminating vertex index to a given half-edge. */
+ inline VertexIndex
+ getTerminatingVertexIndex(const HalfEdgeIndex& idx_half_edge) const
+ {
+ assert(this->isValid(idx_half_edge));
+ return (this->getHalfEdge(idx_half_edge).idx_terminating_vertex_);
+ }
- /** \brief Mark the given vertex and all connected half-edges and faces as deleted.
- * \note Call cleanUp () to finally delete all mesh-elements.
- */
- void
- deleteVertex (const VertexIndex& idx_vertex)
- {
- assert (this->isValid (idx_vertex));
- if (this->isDeleted (idx_vertex)) return;
-
- delete_faces_vertex_.clear ();
- FaceAroundVertexCirculator circ = this->getFaceAroundVertexCirculator (idx_vertex);
- const FaceAroundVertexCirculator circ_end = circ;
- do
- {
- if (circ.getTargetIndex ().isValid ()) // Check for boundary.
- {
- delete_faces_vertex_.push_back (circ.getTargetIndex ());
- }
- } while (++circ!=circ_end);
-
- for (FaceIndices::const_iterator it = delete_faces_vertex_.begin (); it!=delete_faces_vertex_.end (); ++it)
- {
- this->deleteFace (*it);
- }
- }
+ /** \brief Get the originating vertex index to a given half-edge. */
+ inline VertexIndex
+ getOriginatingVertexIndex(const HalfEdgeIndex& idx_half_edge) const
+ {
+ assert(this->isValid(idx_half_edge));
+ return (
+ this->getTerminatingVertexIndex(this->getOppositeHalfEdgeIndex(idx_half_edge)));
+ }
+
+ /** \brief Get the opposite half-edge index to a given half-edge. */
+ inline HalfEdgeIndex
+ getOppositeHalfEdgeIndex(const HalfEdgeIndex& idx_half_edge) const
+ {
+ assert(this->isValid(idx_half_edge));
+ // Check if the index is even or odd and return the other index.
+ return (HalfEdgeIndex(idx_half_edge.get() & 1 ? idx_half_edge.get() - 1
+ : idx_half_edge.get() + 1));
+ }
+
+ /** \brief Get the next half-edge index to a given half-edge. */
+ inline HalfEdgeIndex
+ getNextHalfEdgeIndex(const HalfEdgeIndex& idx_half_edge) const
+ {
+ assert(this->isValid(idx_half_edge));
+ return (this->getHalfEdge(idx_half_edge).idx_next_half_edge_);
+ }
- /** \brief Mark the given half-edge, the opposite half-edge and the associated faces as deleted.
- * \note Call cleanUp () to finally delete all mesh-elements.
- */
- void
- deleteEdge (const HalfEdgeIndex& idx_he)
- {
- assert (this->isValid (idx_he));
- if (this->isDeleted (idx_he)) return;
+ /** \brief Get the previous half-edge index to a given half-edge. */
+ inline HalfEdgeIndex
+ getPrevHalfEdgeIndex(const HalfEdgeIndex& idx_half_edge) const
+ {
+ assert(this->isValid(idx_half_edge));
+ return (this->getHalfEdge(idx_half_edge).idx_prev_half_edge_);
+ }
- HalfEdgeIndex opposite = this->getOppositeHalfEdgeIndex (idx_he);
+ /** \brief Get the face index to a given half-edge. */
+ inline FaceIndex
+ getFaceIndex(const HalfEdgeIndex& idx_half_edge) const
+ {
+ assert(this->isValid(idx_half_edge));
+ return (this->getHalfEdge(idx_half_edge).idx_face_);
+ }
- if (this->isBoundary (idx_he)) this->markDeleted (idx_he);
- else this->deleteFace (this->getFaceIndex (idx_he));
- if (this->isBoundary (opposite)) this->markDeleted (opposite);
- else this->deleteFace (this->getFaceIndex (opposite));
- }
+ /** \brief Get the face index to a given half-edge. */
+ inline FaceIndex
+ getOppositeFaceIndex(const HalfEdgeIndex& idx_half_edge) const
+ {
+ assert(this->isValid(idx_half_edge));
+ return (this->getFaceIndex(this->getOppositeHalfEdgeIndex(idx_half_edge)));
+ }
- /** \brief Mark the given edge (both half-edges) and the associated faces as deleted.
- * \note Call cleanUp () to finally delete all mesh-elements.
- */
- inline void
- deleteEdge (const EdgeIndex& idx_edge)
- {
- assert (this->isValid (idx_edge));
- this->deleteEdge (pcl::geometry::toHalfEdgeIndex (idx_edge));
- assert (this->isDeleted (pcl::geometry::toHalfEdgeIndex (idx_edge, false))); // Bug in this class!
- }
+ ////////////////////////////////////////////////////////////////////////
+ // Face connectivity
+ ////////////////////////////////////////////////////////////////////////
- /** \brief Mark the given face as deleted. More faces are deleted if the manifold mesh would become non-manifold.
- * \note Call cleanUp () to finally delete all mesh-elements.
- */
- inline void
- deleteFace (const FaceIndex& idx_face)
- {
- assert (this->isValid (idx_face));
- if (this->isDeleted (idx_face)) return;
+ /** \brief Get the inner half-edge index to a given face. */
+ inline HalfEdgeIndex
+ getInnerHalfEdgeIndex(const FaceIndex& idx_face) const
+ {
+ assert(this->isValid(idx_face));
+ return (this->getFace(idx_face).idx_inner_half_edge_);
+ }
- this->deleteFace (idx_face, IsManifold ());
- }
+ /** \brief Get the outer half-edge inex to a given face. */
+ inline HalfEdgeIndex
+ getOuterHalfEdgeIndex(const FaceIndex& idx_face) const
+ {
+ assert(this->isValid(idx_face));
+ return (this->getOppositeHalfEdgeIndex(this->getInnerHalfEdgeIndex(idx_face)));
+ }
- /** \brief Removes all mesh elements and data that are marked as deleted.
- * \note This removes all isolated vertices as well.
- */
- void
- cleanUp ()
- {
- // Copy the non-deleted mesh elements and store the index to their new position
- const VertexIndices new_vertex_indices =
- this->remove <Vertices, VertexDataCloud, VertexIndices, HasVertexData>
- (vertices_, vertex_data_cloud_);
- const HalfEdgeIndices new_half_edge_indices =
- this->remove <HalfEdges, HalfEdgeDataCloud, HalfEdgeIndices, HasHalfEdgeData>
- (half_edges_, half_edge_data_cloud_);
- const FaceIndices new_face_indices =
- this->remove <Faces, FaceDataCloud, FaceIndices, HasFaceData>
- (faces_, face_data_cloud_);
-
- // Remove deleted edge data
- if (HasEdgeData::value)
- {
- auto it_ed_old = edge_data_cloud_.begin ();
- auto it_ed_new = edge_data_cloud_.begin ();
-
- for (auto it_ind = new_half_edge_indices.cbegin (), it_ind_end = new_half_edge_indices.cend (); it_ind!=it_ind_end; it_ind+=2, ++it_ed_old)
- {
- if (it_ind->isValid ())
- {
- *it_ed_new++ = *it_ed_old;
- }
- }
- edge_data_cloud_.resize (this->sizeEdges ());
- }
-
- // Adjust the indices
- for (VertexIterator it = vertices_.begin (); it!=vertices_.end (); ++it)
- {
- if (it->idx_outgoing_half_edge_.isValid ())
- {
- it->idx_outgoing_half_edge_ = new_half_edge_indices [it->idx_outgoing_half_edge_.get ()];
- }
- }
-
- for (HalfEdgeIterator it = half_edges_.begin (); it!=half_edges_.end (); ++it)
- {
- it->idx_terminating_vertex_ = new_vertex_indices [it->idx_terminating_vertex_.get ()];
- it->idx_next_half_edge_ = new_half_edge_indices [it->idx_next_half_edge_.get ()];
- it->idx_prev_half_edge_ = new_half_edge_indices [it->idx_prev_half_edge_.get ()];
- if (it->idx_face_.isValid ())
- {
- it->idx_face_ = new_face_indices [it->idx_face_.get ()];
- }
- }
-
- for (FaceIterator it = faces_.begin (); it!=faces_.end (); ++it)
- {
- it->idx_inner_half_edge_ = new_half_edge_indices [it->idx_inner_half_edge_.get ()];
- }
- }
+ ////////////////////////////////////////////////////////////////////////
+ // Circulators
+ ////////////////////////////////////////////////////////////////////////
- ////////////////////////////////////////////////////////////////////////
- // Vertex connectivity
- ////////////////////////////////////////////////////////////////////////
+ /** \see pcl::geometry::VertexAroundVertexCirculator */
+ inline VertexAroundVertexCirculator
+ getVertexAroundVertexCirculator(const VertexIndex& idx_vertex) const
+ {
+ assert(this->isValid(idx_vertex));
+ return (VertexAroundVertexCirculator(idx_vertex, this));
+ }
- /** \brief Get the outgoing half-edge index to a given vertex. */
- inline HalfEdgeIndex
- getOutgoingHalfEdgeIndex (const VertexIndex& idx_vertex) const
- {
- assert (this->isValid (idx_vertex));
- return (this->getVertex (idx_vertex).idx_outgoing_half_edge_);
- }
+ /** \see pcl::geometry::VertexAroundVertexCirculator */
+ inline VertexAroundVertexCirculator
+ getVertexAroundVertexCirculator(const HalfEdgeIndex& idx_outgoing_half_edge) const
+ {
+ assert(this->isValid(idx_outgoing_half_edge));
+ return (VertexAroundVertexCirculator(idx_outgoing_half_edge, this));
+ }
- /** \brief Get the incoming half-edge index to a given vertex. */
- inline HalfEdgeIndex
- getIncomingHalfEdgeIndex (const VertexIndex& idx_vertex) const
- {
- assert (this->isValid (idx_vertex));
- return (this->getOppositeHalfEdgeIndex (this->getOutgoingHalfEdgeIndex (idx_vertex)));
- }
+ /** \see pcl::geometry::OutgoingHalfEdgeAroundVertexCirculator */
+ inline OutgoingHalfEdgeAroundVertexCirculator
+ getOutgoingHalfEdgeAroundVertexCirculator(const VertexIndex& idx_vertex) const
+ {
+ assert(this->isValid(idx_vertex));
+ return (OutgoingHalfEdgeAroundVertexCirculator(idx_vertex, this));
+ }
+
+ /** \see pcl::geometry::OutgoingHalfEdgeAroundVertexCirculator */
+ inline OutgoingHalfEdgeAroundVertexCirculator
+ getOutgoingHalfEdgeAroundVertexCirculator(
+ const HalfEdgeIndex& idx_outgoing_half_edge) const
+ {
+ assert(this->isValid(idx_outgoing_half_edge));
+ return (OutgoingHalfEdgeAroundVertexCirculator(idx_outgoing_half_edge, this));
+ }
- ////////////////////////////////////////////////////////////////////////
- // Half-edge connectivity
- ////////////////////////////////////////////////////////////////////////
+ /** \see pcl::geometry::IncomingHalfEdgeAroundVertexCirculator */
+ inline IncomingHalfEdgeAroundVertexCirculator
+ getIncomingHalfEdgeAroundVertexCirculator(const VertexIndex& idx_vertex) const
+ {
+ assert(this->isValid(idx_vertex));
+ return (IncomingHalfEdgeAroundVertexCirculator(idx_vertex, this));
+ }
+
+ /** \see pcl::geometry::IncomingHalfEdgeAroundVertexCirculator */
+ inline IncomingHalfEdgeAroundVertexCirculator
+ getIncomingHalfEdgeAroundVertexCirculator(
+ const HalfEdgeIndex& idx_incoming_half_edge) const
+ {
+ assert(this->isValid(idx_incoming_half_edge));
+ return (IncomingHalfEdgeAroundVertexCirculator(idx_incoming_half_edge, this));
+ }
- /** \brief Get the terminating vertex index to a given half-edge. */
- inline VertexIndex
- getTerminatingVertexIndex (const HalfEdgeIndex& idx_half_edge) const
- {
- assert (this->isValid (idx_half_edge));
- return (this->getHalfEdge (idx_half_edge).idx_terminating_vertex_);
- }
+ /** \see pcl::geometry::FaceAroundVertexCirculator */
+ inline FaceAroundVertexCirculator
+ getFaceAroundVertexCirculator(const VertexIndex& idx_vertex) const
+ {
+ assert(this->isValid(idx_vertex));
+ return (FaceAroundVertexCirculator(idx_vertex, this));
+ }
- /** \brief Get the originating vertex index to a given half-edge. */
- inline VertexIndex
- getOriginatingVertexIndex (const HalfEdgeIndex& idx_half_edge) const
- {
- assert (this->isValid (idx_half_edge));
- return (this->getTerminatingVertexIndex (this->getOppositeHalfEdgeIndex (idx_half_edge)));
- }
+ /** \see pcl::geometry::FaceAroundVertexCirculator */
+ inline FaceAroundVertexCirculator
+ getFaceAroundVertexCirculator(const HalfEdgeIndex& idx_outgoing_half_edge) const
+ {
+ assert(this->isValid(idx_outgoing_half_edge));
+ return (FaceAroundVertexCirculator(idx_outgoing_half_edge, this));
+ }
- /** \brief Get the opposite half-edge index to a given half-edge. */
- inline HalfEdgeIndex
- getOppositeHalfEdgeIndex (const HalfEdgeIndex& idx_half_edge) const
- {
- assert (this->isValid (idx_half_edge));
- // Check if the index is even or odd and return the other index.
- return (HalfEdgeIndex (idx_half_edge.get () & 1 ? idx_half_edge.get () - 1 : idx_half_edge.get () + 1));
- }
+ /** \see pcl::geometry::VertexAroundFaceCirculator */
+ inline VertexAroundFaceCirculator
+ getVertexAroundFaceCirculator(const FaceIndex& idx_face) const
+ {
+ assert(this->isValid(idx_face));
+ return (VertexAroundFaceCirculator(idx_face, this));
+ }
- /** \brief Get the next half-edge index to a given half-edge. */
- inline HalfEdgeIndex
- getNextHalfEdgeIndex (const HalfEdgeIndex& idx_half_edge) const
- {
- assert (this->isValid (idx_half_edge));
- return (this->getHalfEdge (idx_half_edge).idx_next_half_edge_);
- }
+ /** \see pcl::geometry::VertexAroundFaceCirculator */
+ inline VertexAroundFaceCirculator
+ getVertexAroundFaceCirculator(const HalfEdgeIndex& idx_inner_half_edge) const
+ {
+ assert(this->isValid(idx_inner_half_edge));
+ return (VertexAroundFaceCirculator(idx_inner_half_edge, this));
+ }
- /** \brief Get the previous half-edge index to a given half-edge. */
- inline HalfEdgeIndex
- getPrevHalfEdgeIndex (const HalfEdgeIndex& idx_half_edge) const
- {
- assert (this->isValid (idx_half_edge));
- return (this->getHalfEdge (idx_half_edge).idx_prev_half_edge_);
- }
+ /** \see pcl::geometry::InnerHalfEdgeAroundFaceCirculator */
+ inline InnerHalfEdgeAroundFaceCirculator
+ getInnerHalfEdgeAroundFaceCirculator(const FaceIndex& idx_face) const
+ {
+ assert(this->isValid(idx_face));
+ return (InnerHalfEdgeAroundFaceCirculator(idx_face, this));
+ }
- /** \brief Get the face index to a given half-edge. */
- inline FaceIndex
- getFaceIndex (const HalfEdgeIndex& idx_half_edge) const
- {
- assert (this->isValid (idx_half_edge));
- return (this->getHalfEdge (idx_half_edge).idx_face_);
- }
+ /** \see pcl::geometry::InnerHalfEdgeAroundFaceCirculator */
+ inline InnerHalfEdgeAroundFaceCirculator
+ getInnerHalfEdgeAroundFaceCirculator(const HalfEdgeIndex& idx_inner_half_edge) const
+ {
+ assert(this->isValid(idx_inner_half_edge));
+ return (InnerHalfEdgeAroundFaceCirculator(idx_inner_half_edge, this));
+ }
- /** \brief Get the face index to a given half-edge. */
- inline FaceIndex
- getOppositeFaceIndex (const HalfEdgeIndex& idx_half_edge) const
- {
- assert (this->isValid (idx_half_edge));
- return (this->getFaceIndex (this->getOppositeHalfEdgeIndex (idx_half_edge)));
- }
+ /** \see pcl::geometry::OuterHalfEdgeAroundFaceCirculator */
+ inline OuterHalfEdgeAroundFaceCirculator
+ getOuterHalfEdgeAroundFaceCirculator(const FaceIndex& idx_face) const
+ {
+ assert(this->isValid(idx_face));
+ return (OuterHalfEdgeAroundFaceCirculator(idx_face, this));
+ }
- ////////////////////////////////////////////////////////////////////////
- // Face connectivity
- ////////////////////////////////////////////////////////////////////////
+ /** \see pcl::geometry::OuterHalfEdgeAroundFaceCirculator */
+ inline OuterHalfEdgeAroundFaceCirculator
+ getOuterHalfEdgeAroundFaceCirculator(const HalfEdgeIndex& idx_inner_half_edge) const
+ {
+ assert(this->isValid(idx_inner_half_edge));
+ return (OuterHalfEdgeAroundFaceCirculator(idx_inner_half_edge, this));
+ }
- /** \brief Get the inner half-edge index to a given face. */
- inline HalfEdgeIndex
- getInnerHalfEdgeIndex (const FaceIndex& idx_face) const
- {
- assert (this->isValid (idx_face));
- return (this->getFace (idx_face).idx_inner_half_edge_);
- }
+ /** \see pcl::geometry::FaceAroundFaceCirculator */
+ inline FaceAroundFaceCirculator
+ getFaceAroundFaceCirculator(const FaceIndex& idx_face) const
+ {
+ assert(this->isValid(idx_face));
+ return (FaceAroundFaceCirculator(idx_face, this));
+ }
- /** \brief Get the outer half-edge inex to a given face. */
- inline HalfEdgeIndex
- getOuterHalfEdgeIndex (const FaceIndex& idx_face) const
- {
- assert (this->isValid (idx_face));
- return (this->getOppositeHalfEdgeIndex (this->getInnerHalfEdgeIndex (idx_face)));
- }
+ /** \see pcl::geometry::FaceAroundFaceCirculator */
+ inline FaceAroundFaceCirculator
+ getFaceAroundFaceCirculator(const HalfEdgeIndex& idx_inner_half_edge) const
+ {
+ assert(this->isValid(idx_inner_half_edge));
+ return (FaceAroundFaceCirculator(idx_inner_half_edge, this));
+ }
- ////////////////////////////////////////////////////////////////////////
- // Circulators
- ////////////////////////////////////////////////////////////////////////
+ //////////////////////////////////////////////////////////////////////////
+ // isEqualTopology
+ //////////////////////////////////////////////////////////////////////////
- /** \see pcl::geometry::VertexAroundVertexCirculator */
- inline VertexAroundVertexCirculator
- getVertexAroundVertexCirculator (const VertexIndex& idx_vertex) const
- {
- assert (this->isValid (idx_vertex));
- return (VertexAroundVertexCirculator (idx_vertex, this));
- }
+ /** \brief Check if the other mesh has the same topology as this mesh. */
+ bool
+ isEqualTopology(const Self& other) const
+ {
+ if (this->sizeVertices() != other.sizeVertices())
+ return (false);
+ if (this->sizeHalfEdges() != other.sizeHalfEdges())
+ return (false);
+ if (this->sizeFaces() != other.sizeFaces())
+ return (false);
+
+ for (std::size_t i = 0; i < this->sizeVertices(); ++i) {
+ if (this->getOutgoingHalfEdgeIndex(VertexIndex(i)) !=
+ other.getOutgoingHalfEdgeIndex(VertexIndex(i)))
+ return (false);
+ }
+
+ for (std::size_t i = 0; i < this->sizeHalfEdges(); ++i) {
+ if (this->getTerminatingVertexIndex(HalfEdgeIndex(i)) !=
+ other.getTerminatingVertexIndex(HalfEdgeIndex(i)))
+ return (false);
+
+ if (this->getNextHalfEdgeIndex(HalfEdgeIndex(i)) !=
+ other.getNextHalfEdgeIndex(HalfEdgeIndex(i)))
+ return (false);
+
+ if (this->getPrevHalfEdgeIndex(HalfEdgeIndex(i)) !=
+ other.getPrevHalfEdgeIndex(HalfEdgeIndex(i)))
+ return (false);
+
+ if (this->getFaceIndex(HalfEdgeIndex(i)) != other.getFaceIndex(HalfEdgeIndex(i)))
+ return (false);
+ }
+
+ for (std::size_t i = 0; i < this->sizeFaces(); ++i) {
+ if (this->getInnerHalfEdgeIndex(FaceIndex(i)) !=
+ other.getInnerHalfEdgeIndex(FaceIndex(i)))
+ return (false);
+ }
+
+ return (true);
+ }
+
+ ////////////////////////////////////////////////////////////////////////
+ // isValid
+ ////////////////////////////////////////////////////////////////////////
+
+ /** \brief Check if the given vertex index is a valid index into the mesh. */
+ inline bool
+ isValid(const VertexIndex& idx_vertex) const
+ {
+ return (idx_vertex >= VertexIndex(0) &&
+ idx_vertex < VertexIndex(int(vertices_.size())));
+ }
- /** \see pcl::geometry::VertexAroundVertexCirculator */
- inline VertexAroundVertexCirculator
- getVertexAroundVertexCirculator (const HalfEdgeIndex& idx_outgoing_half_edge) const
- {
- assert (this->isValid (idx_outgoing_half_edge));
- return (VertexAroundVertexCirculator (idx_outgoing_half_edge, this));
- }
+ /** \brief Check if the given half-edge index is a valid index into the mesh. */
+ inline bool
+ isValid(const HalfEdgeIndex& idx_he) const
+ {
+ return (idx_he >= HalfEdgeIndex(0) && idx_he < HalfEdgeIndex(half_edges_.size()));
+ }
- /** \see pcl::geometry::OutgoingHalfEdgeAroundVertexCirculator */
- inline OutgoingHalfEdgeAroundVertexCirculator
- getOutgoingHalfEdgeAroundVertexCirculator (const VertexIndex& idx_vertex) const
- {
- assert (this->isValid (idx_vertex));
- return (OutgoingHalfEdgeAroundVertexCirculator (idx_vertex, this));
- }
+ /** \brief Check if the given edge index is a valid index into the mesh. */
+ inline bool
+ isValid(const EdgeIndex& idx_edge) const
+ {
+ return (idx_edge >= EdgeIndex(0) && idx_edge < EdgeIndex(half_edges_.size() / 2));
+ }
- /** \see pcl::geometry::OutgoingHalfEdgeAroundVertexCirculator */
- inline OutgoingHalfEdgeAroundVertexCirculator
- getOutgoingHalfEdgeAroundVertexCirculator (const HalfEdgeIndex& idx_outgoing_half_edge) const
- {
- assert (this->isValid (idx_outgoing_half_edge));
- return (OutgoingHalfEdgeAroundVertexCirculator (idx_outgoing_half_edge, this));
- }
+ /** \brief Check if the given face index is a valid index into the mesh. */
+ inline bool
+ isValid(const FaceIndex& idx_face) const
+ {
+ return (idx_face >= FaceIndex(0) && idx_face < FaceIndex(faces_.size()));
+ }
- /** \see pcl::geometry::IncomingHalfEdgeAroundVertexCirculator */
- inline IncomingHalfEdgeAroundVertexCirculator
- getIncomingHalfEdgeAroundVertexCirculator (const VertexIndex& idx_vertex) const
- {
- assert (this->isValid (idx_vertex));
- return (IncomingHalfEdgeAroundVertexCirculator (idx_vertex, this));
- }
+ ////////////////////////////////////////////////////////////////////////
+ // isDeleted
+ ////////////////////////////////////////////////////////////////////////
- /** \see pcl::geometry::IncomingHalfEdgeAroundVertexCirculator */
- inline IncomingHalfEdgeAroundVertexCirculator
- getIncomingHalfEdgeAroundVertexCirculator (const HalfEdgeIndex& idx_incoming_half_edge) const
- {
- assert (this->isValid (idx_incoming_half_edge));
- return (IncomingHalfEdgeAroundVertexCirculator (idx_incoming_half_edge, this));
- }
+ /** \brief Check if the given vertex is marked as deleted. */
+ inline bool
+ isDeleted(const VertexIndex& idx_vertex) const
+ {
+ assert(this->isValid(idx_vertex));
+ return (!this->getOutgoingHalfEdgeIndex(idx_vertex).isValid());
+ }
- /** \see pcl::geometry::FaceAroundVertexCirculator */
- inline FaceAroundVertexCirculator
- getFaceAroundVertexCirculator (const VertexIndex& idx_vertex) const
- {
- assert (this->isValid (idx_vertex));
- return (FaceAroundVertexCirculator (idx_vertex, this));
- }
+ /** \brief Check if the given half-edge is marked as deleted. */
+ inline bool
+ isDeleted(const HalfEdgeIndex& idx_he) const
+ {
+ assert(this->isValid(idx_he));
+ return (!this->getTerminatingVertexIndex(idx_he).isValid());
+ }
+
+ /** \brief Check if the given edge (any of the two half-edges) is marked as deleted.
+ */
+ inline bool
+ isDeleted(const EdgeIndex& idx_edge) const
+ {
+ assert(this->isValid(idx_edge));
+ return (this->isDeleted(pcl::geometry::toHalfEdgeIndex(idx_edge, true)) ||
+ this->isDeleted(pcl::geometry::toHalfEdgeIndex(idx_edge, false)));
+ }
+
+ /** \brief Check if the given face is marked as deleted. */
+ inline bool
+ isDeleted(const FaceIndex& idx_face) const
+ {
+ assert(this->isValid(idx_face));
+ return (!this->getInnerHalfEdgeIndex(idx_face).isValid());
+ }
- /** \see pcl::geometry::FaceAroundVertexCirculator */
- inline FaceAroundVertexCirculator
- getFaceAroundVertexCirculator (const HalfEdgeIndex& idx_outgoing_half_edge) const
- {
- assert (this->isValid (idx_outgoing_half_edge));
- return (FaceAroundVertexCirculator (idx_outgoing_half_edge, this));
- }
+ ////////////////////////////////////////////////////////////////////////
+ // isIsolated
+ ////////////////////////////////////////////////////////////////////////
- /** \see pcl::geometry::VertexAroundFaceCirculator */
- inline VertexAroundFaceCirculator
- getVertexAroundFaceCirculator (const FaceIndex& idx_face) const
- {
- assert (this->isValid (idx_face));
- return (VertexAroundFaceCirculator (idx_face, this));
- }
+ /** \brief Check if the given vertex is isolated (not connected to other elements). */
+ inline bool
+ isIsolated(const VertexIndex& idx_vertex) const
+ {
+ assert(this->isValid(idx_vertex));
+ return (!this->getOutgoingHalfEdgeIndex(idx_vertex).isValid());
+ }
+
+ ////////////////////////////////////////////////////////////////////////
+ // isBoundary
+ ////////////////////////////////////////////////////////////////////////
+
+ /** \brief Check if the given vertex lies on the boundary. Isolated vertices are
+ * considered to be on the boundary. */
+ inline bool
+ isBoundary(const VertexIndex& idx_vertex) const
+ {
+ assert(this->isValid(idx_vertex));
+ if (this->isIsolated(idx_vertex))
+ return (true);
+ return (this->isBoundary(this->getOutgoingHalfEdgeIndex(idx_vertex)));
+ }
+
+ /** \brief Check if the given half-edge lies on the bounddary. */
+ inline bool
+ isBoundary(const HalfEdgeIndex& idx_he) const
+ {
+ assert(this->isValid(idx_he));
+ return (!this->getFaceIndex(idx_he).isValid());
+ }
+
+ /** \brief Check if the given edge lies on the boundary (any of the two half-edges
+ * lies on the boundary. */
+ inline bool
+ isBoundary(const EdgeIndex& idx_edge) const
+ {
+ assert(this->isValid(idx_edge));
+ const HalfEdgeIndex& idx = pcl::geometry::toHalfEdgeIndex(idx_edge);
+ return (this->isBoundary(idx) ||
+ this->isBoundary(this->getOppositeHalfEdgeIndex(idx)));
+ }
+
+ /**
+ * \brief Check if the given face lies on the boundary. There are two versions of
+ * this method, selected by the template parameter.
+ * \tparam CheckVerticesT Check if any vertex lies on the boundary (true) or
+ * check if any edge lies on the boundary (false).
+ */
+ template <bool CheckVerticesT>
+ inline bool
+ isBoundary(const FaceIndex& idx_face) const
+ {
+ assert(this->isValid(idx_face));
+ return (this->isBoundary(idx_face, std::integral_constant<bool, CheckVerticesT>()));
+ }
+
+ /** \brief Check if the given face lies on the boundary. This method uses isBoundary
+ * \c true which checks if any vertex lies on the boundary. */
+ inline bool
+ isBoundary(const FaceIndex& idx_face) const
+ {
+ assert(this->isValid(idx_face));
+ return (this->isBoundary(idx_face, std::true_type()));
+ }
- /** \see pcl::geometry::VertexAroundFaceCirculator */
- inline VertexAroundFaceCirculator
- getVertexAroundFaceCirculator (const HalfEdgeIndex& idx_inner_half_edge) const
- {
- assert (this->isValid (idx_inner_half_edge));
- return (VertexAroundFaceCirculator (idx_inner_half_edge, this));
- }
+ ////////////////////////////////////////////////////////////////////////
+ // isManifold
+ ////////////////////////////////////////////////////////////////////////
- /** \see pcl::geometry::InnerHalfEdgeAroundFaceCirculator */
- inline InnerHalfEdgeAroundFaceCirculator
- getInnerHalfEdgeAroundFaceCirculator (const FaceIndex& idx_face) const
- {
- assert (this->isValid (idx_face));
- return (InnerHalfEdgeAroundFaceCirculator (idx_face, this));
- }
+ /** \brief Check if the given vertex is manifold. Isolated vertices are manifold. */
+ inline bool
+ isManifold(const VertexIndex& idx_vertex) const
+ {
+ assert(this->isValid(idx_vertex));
+ if (this->isIsolated(idx_vertex))
+ return (true);
+ return (this->isManifold(idx_vertex, IsManifold()));
+ }
+
+ /** \brief Check if the mesh is manifold. */
+ inline bool
+ isManifold() const
+ {
+ return (this->isManifold(IsManifold()));
+ }
- /** \see pcl::geometry::InnerHalfEdgeAroundFaceCirculator */
- inline InnerHalfEdgeAroundFaceCirculator
- getInnerHalfEdgeAroundFaceCirculator (const HalfEdgeIndex& idx_inner_half_edge) const
- {
- assert (this->isValid (idx_inner_half_edge));
- return (InnerHalfEdgeAroundFaceCirculator (idx_inner_half_edge, this));
- }
+ ////////////////////////////////////////////////////////////////////////
+ // size
+ ////////////////////////////////////////////////////////////////////////
- /** \see pcl::geometry::OuterHalfEdgeAroundFaceCirculator */
- inline OuterHalfEdgeAroundFaceCirculator
- getOuterHalfEdgeAroundFaceCirculator (const FaceIndex& idx_face) const
- {
- assert (this->isValid (idx_face));
- return (OuterHalfEdgeAroundFaceCirculator (idx_face, this));
- }
+ /** \brief Get the number of the vertices. */
+ inline std::size_t
+ sizeVertices() const
+ {
+ return (vertices_.size());
+ }
- /** \see pcl::geometry::OuterHalfEdgeAroundFaceCirculator */
- inline OuterHalfEdgeAroundFaceCirculator
- getOuterHalfEdgeAroundFaceCirculator (const HalfEdgeIndex& idx_inner_half_edge) const
- {
- assert (this->isValid (idx_inner_half_edge));
- return (OuterHalfEdgeAroundFaceCirculator (idx_inner_half_edge, this));
- }
+ /** \brief Get the number of the half-edges. */
+ inline std::size_t
+ sizeHalfEdges() const
+ {
+ assert(half_edges_.size() % 2 == 0); // This would be a bug in the mesh.
+ return (half_edges_.size());
+ }
- /** \see pcl::geometry::FaceAroundFaceCirculator */
- inline FaceAroundFaceCirculator
- getFaceAroundFaceCirculator (const FaceIndex& idx_face) const
- {
- assert (this->isValid (idx_face));
- return (FaceAroundFaceCirculator (idx_face, this));
- }
+ /** \brief Get the number of the edges. */
+ inline std::size_t
+ sizeEdges() const
+ {
+ assert(half_edges_.size() % 2 == 0); // This would be a bug in the mesh.
+ return (half_edges_.size() / 2);
+ }
- /** \see pcl::geometry::FaceAroundFaceCirculator */
- inline FaceAroundFaceCirculator
- getFaceAroundFaceCirculator (const HalfEdgeIndex& idx_inner_half_edge) const
- {
- assert (this->isValid (idx_inner_half_edge));
- return (FaceAroundFaceCirculator (idx_inner_half_edge, this));
- }
+ /** \brief Get the number of the faces. */
+ inline std::size_t
+ sizeFaces() const
+ {
+ return (faces_.size());
+ }
- //////////////////////////////////////////////////////////////////////////
- // isEqualTopology
- //////////////////////////////////////////////////////////////////////////
+ ////////////////////////////////////////////////////////////////////////
+ // empty
+ ////////////////////////////////////////////////////////////////////////
- /** \brief Check if the other mesh has the same topology as this mesh. */
- bool
- isEqualTopology (const Self& other) const
- {
- if (this->sizeVertices () != other.sizeVertices ()) return (false);
- if (this->sizeHalfEdges () != other.sizeHalfEdges ()) return (false);
- if (this->sizeFaces () != other.sizeFaces ()) return (false);
-
- for (std::size_t i=0; i<this->sizeVertices (); ++i)
- {
- if (this->getOutgoingHalfEdgeIndex (VertexIndex (i)) !=
- other.getOutgoingHalfEdgeIndex (VertexIndex (i))) return (false);
- }
-
- for (std::size_t i=0; i<this->sizeHalfEdges (); ++i)
- {
- if (this->getTerminatingVertexIndex (HalfEdgeIndex (i)) !=
- other.getTerminatingVertexIndex (HalfEdgeIndex (i))) return (false);
-
- if (this->getNextHalfEdgeIndex (HalfEdgeIndex (i)) !=
- other.getNextHalfEdgeIndex (HalfEdgeIndex (i))) return (false);
-
- if (this->getPrevHalfEdgeIndex (HalfEdgeIndex (i)) !=
- other.getPrevHalfEdgeIndex (HalfEdgeIndex (i))) return (false);
-
- if (this->getFaceIndex (HalfEdgeIndex (i)) !=
- other.getFaceIndex (HalfEdgeIndex (i))) return (false);
- }
-
- for (std::size_t i=0; i<this->sizeFaces (); ++i)
- {
- if (this->getInnerHalfEdgeIndex (FaceIndex (i)) !=
- other.getInnerHalfEdgeIndex (FaceIndex (i))) return (false);
- }
-
- return (true);
- }
+ /** \brief Check if the mesh is empty. */
+ inline bool
+ empty() const
+ {
+ return (this->emptyVertices() && this->emptyEdges() && this->emptyFaces());
+ }
- ////////////////////////////////////////////////////////////////////////
- // isValid
- ////////////////////////////////////////////////////////////////////////
+ /** \brief Check if the vertices are empty. */
+ inline bool
+ emptyVertices() const
+ {
+ return (vertices_.empty());
+ }
- /** \brief Check if the given vertex index is a valid index into the mesh. */
- inline bool
- isValid (const VertexIndex& idx_vertex) const
- {
- return (idx_vertex >= VertexIndex (0) && idx_vertex < VertexIndex (int (vertices_.size ())));
- }
+ /** \brief Check if the edges are empty. */
+ inline bool
+ emptyEdges() const
+ {
+ return (half_edges_.empty());
+ }
- /** \brief Check if the given half-edge index is a valid index into the mesh. */
- inline bool
- isValid (const HalfEdgeIndex& idx_he) const
- {
- return (idx_he >= HalfEdgeIndex (0) && idx_he < HalfEdgeIndex (half_edges_.size ()));
- }
+ /** \brief Check if the faces are empty. */
+ inline bool
+ emptyFaces() const
+ {
+ return (faces_.empty());
+ }
- /** \brief Check if the given edge index is a valid index into the mesh. */
- inline bool
- isValid (const EdgeIndex& idx_edge) const
- {
- return (idx_edge >= EdgeIndex (0) && idx_edge < EdgeIndex (half_edges_.size () / 2));
- }
+ ////////////////////////////////////////////////////////////////////////
+ // reserve
+ ////////////////////////////////////////////////////////////////////////
- /** \brief Check if the given face index is a valid index into the mesh. */
- inline bool
- isValid (const FaceIndex& idx_face) const
- {
- return (idx_face >= FaceIndex (0) && idx_face < FaceIndex (faces_.size ()));
- }
+ /** \brief Reserve storage space n vertices. */
+ inline void
+ reserveVertices(const std::size_t n)
+ {
+ vertices_.reserve(n);
+ this->reserveData(vertex_data_cloud_, n, HasVertexData());
+ }
+
+ /** \brief Reserve storage space for n edges (2*n storage space is reserved for the
+ * half-edges). */
+ inline void
+ reserveEdges(const std::size_t n)
+ {
+ half_edges_.reserve(2 * n);
+ this->reserveData(half_edge_data_cloud_, 2 * n, HasHalfEdgeData());
+ this->reserveData(edge_data_cloud_, n, HasEdgeData());
+ }
+
+ /** \brief Reserve storage space for n faces. */
+ inline void
+ reserveFaces(const std::size_t n)
+ {
+ faces_.reserve(n);
+ this->reserveData(face_data_cloud_, n, HasFaceData());
+ }
- ////////////////////////////////////////////////////////////////////////
- // isDeleted
- ////////////////////////////////////////////////////////////////////////
+ ////////////////////////////////////////////////////////////////////////
+ // resize
+ ////////////////////////////////////////////////////////////////////////
- /** \brief Check if the given vertex is marked as deleted. */
- inline bool
- isDeleted (const VertexIndex& idx_vertex) const
- {
- assert (this->isValid (idx_vertex));
- return (!this->getOutgoingHalfEdgeIndex (idx_vertex).isValid ());
- }
+ /** \brief Resize the the vertices to n elements. */
+ inline void
+ resizeVertices(const std::size_t n, const VertexData& data = VertexData())
+ {
+ vertices_.resize(n);
+ this->resizeData(vertex_data_cloud_, n, data, HasVertexData());
+ }
+
+ /** \brief Resize the edges to n elements (half-edges will hold 2*n elements). */
+ inline void
+ resizeEdges(const std::size_t n,
+ const EdgeData& edge_data = EdgeData(),
+ const HalfEdgeData he_data = HalfEdgeData())
+ {
+ half_edges_.resize(2 * n);
+ this->resizeData(half_edge_data_cloud_, 2 * n, he_data, HasHalfEdgeData());
+ this->resizeData(edge_data_cloud_, n, edge_data, HasEdgeData());
+ }
+
+ /** \brief Resize the faces to n elements. */
+ inline void
+ resizeFaces(const std::size_t n, const FaceData& data = FaceData())
+ {
+ faces_.resize(n);
+ this->resizeData(face_data_cloud_, n, data, HasFaceData());
+ }
- /** \brief Check if the given half-edge is marked as deleted. */
- inline bool
- isDeleted (const HalfEdgeIndex& idx_he) const
- {
- assert (this->isValid (idx_he));
- return (!this->getTerminatingVertexIndex (idx_he).isValid ());
- }
+ ////////////////////////////////////////////////////////////////////////
+ // clear
+ ////////////////////////////////////////////////////////////////////////
- /** \brief Check if the given edge (any of the two half-edges) is marked as deleted. */
- inline bool
- isDeleted (const EdgeIndex& idx_edge) const
- {
- assert (this->isValid (idx_edge));
- return (this->isDeleted (pcl::geometry::toHalfEdgeIndex (idx_edge, true)) ||
- this->isDeleted (pcl::geometry::toHalfEdgeIndex (idx_edge, false)));
- }
+ /** \brief Clear all mesh elements and data. */
+ void
+ clear()
+ {
+ vertices_.clear();
+ half_edges_.clear();
+ faces_.clear();
+
+ this->clearData(vertex_data_cloud_, HasVertexData());
+ this->clearData(half_edge_data_cloud_, HasHalfEdgeData());
+ this->clearData(edge_data_cloud_, HasEdgeData());
+ this->clearData(face_data_cloud_, HasFaceData());
+ }
+
+ ////////////////////////////////////////////////////////////////////////
+ // get / set the vertex data cloud
+ ////////////////////////////////////////////////////////////////////////
+
+ /** \brief Get access to the stored vertex data.
+ * \warning Please make sure to NOT add or remove elements from the cloud.
+ */
+ inline VertexDataCloud&
+ getVertexDataCloud()
+ {
+ return (vertex_data_cloud_);
+ }
- /** \brief Check if the given face is marked as deleted. */
- inline bool
- isDeleted (const FaceIndex& idx_face) const
- {
- assert (this->isValid (idx_face));
- return (!this->getInnerHalfEdgeIndex (idx_face).isValid ());
- }
+ /** \brief Get the stored vertex data. */
+ inline VertexDataCloud
+ getVertexDataCloud() const
+ {
+ return (vertex_data_cloud_);
+ }
+
+ /** \brief Change the stored vertex data.
+ * \param[in] vertex_data_cloud The new vertex data. Must be the same as the current
+ * data.
+ * \return true if the cloud could be set.
+ */
+ inline bool
+ setVertexDataCloud(const VertexDataCloud& vertex_data_cloud)
+ {
+ if (vertex_data_cloud.size() == vertex_data_cloud_.size()) {
+ vertex_data_cloud_ = vertex_data_cloud;
+ return (true);
+ }
+ return (false);
+ }
+
+ ////////////////////////////////////////////////////////////////////////
+ // get / set the half-edge data cloud
+ ////////////////////////////////////////////////////////////////////////
+
+ /** \brief Get access to the stored half-edge data.
+ * \warning Please make sure to NOT add or remove elements from the cloud.
+ */
+ inline HalfEdgeDataCloud&
+ getHalfEdgeDataCloud()
+ {
+ return (half_edge_data_cloud_);
+ }
- ////////////////////////////////////////////////////////////////////////
- // isIsolated
- ////////////////////////////////////////////////////////////////////////
+ /** \brief Get the stored half-edge data. */
+ inline HalfEdgeDataCloud
+ getHalfEdgeDataCloud() const
+ {
+ return (half_edge_data_cloud_);
+ }
+
+ /** \brief Change the stored half-edge data.
+ * \param[in] half_edge_data_cloud The new half-edge data. Must be the same as the
+ * current data.
+ * \return true if the cloud could be set.
+ */
+ inline bool
+ setHalfEdgeDataCloud(const HalfEdgeDataCloud& half_edge_data_cloud)
+ {
+ if (half_edge_data_cloud.size() == half_edge_data_cloud_.size()) {
+ half_edge_data_cloud_ = half_edge_data_cloud;
+ return (true);
+ }
+ return (false);
+ }
+
+ ////////////////////////////////////////////////////////////////////////
+ // get / set the edge data cloud
+ ////////////////////////////////////////////////////////////////////////
+
+ /** \brief Get access to the stored edge data.
+ * \warning Please make sure to NOT add or remove elements from the cloud.
+ */
+ inline EdgeDataCloud&
+ getEdgeDataCloud()
+ {
+ return (edge_data_cloud_);
+ }
- /** \brief Check if the given vertex is isolated (not connected to other elements). */
- inline bool
- isIsolated (const VertexIndex& idx_vertex) const
- {
- assert (this->isValid (idx_vertex));
- return (!this->getOutgoingHalfEdgeIndex (idx_vertex).isValid ());
- }
+ /** \brief Get the stored edge data. */
+ inline EdgeDataCloud
+ getEdgeDataCloud() const
+ {
+ return (edge_data_cloud_);
+ }
+
+ /** \brief Change the stored edge data.
+ * \param[in] edge_data_cloud The new edge data. Must be the same as the current data.
+ * \return true if the cloud could be set.
+ */
+ inline bool
+ setEdgeDataCloud(const EdgeDataCloud& edge_data_cloud)
+ {
+ if (edge_data_cloud.size() == edge_data_cloud_.size()) {
+ edge_data_cloud_ = edge_data_cloud;
+ return (true);
+ }
+ return (false);
+ }
+
+ ////////////////////////////////////////////////////////////////////////
+ // get / set the face data cloud
+ ////////////////////////////////////////////////////////////////////////
+
+ /** \brief Get access to the stored face data.
+ * \warning Please make sure to NOT add or remove elements from the cloud.
+ */
+ inline FaceDataCloud&
+ getFaceDataCloud()
+ {
+ return (face_data_cloud_);
+ }
- ////////////////////////////////////////////////////////////////////////
- // isBoundary
- ////////////////////////////////////////////////////////////////////////
+ /** \brief Get the stored face data. */
+ inline FaceDataCloud
+ getFaceDataCloud() const
+ {
+ return (face_data_cloud_);
+ }
+
+ /** \brief Change the stored face data.
+ * \param[in] face_data_cloud The new face data. Must be the same as the current data.
+ * \return true if the cloud could be set.
+ */
+ inline bool
+ setFaceDataCloud(const FaceDataCloud& face_data_cloud)
+ {
+ if (face_data_cloud.size() == face_data_cloud_.size()) {
+ face_data_cloud_ = face_data_cloud;
+ return (true);
+ }
+ return (false);
+ }
+
+ ////////////////////////////////////////////////////////////////////////
+ // getVertexIndex / getHalfEdgeIndex / getEdgeIndex / getFaceIndex
+ ////////////////////////////////////////////////////////////////////////
+
+ /** \brief Get the index associated to the given vertex data.
+ * \return Invalid index if the mesh does not have associated vertex data.
+ */
+ inline VertexIndex
+ getVertexIndex(const VertexData& vertex_data) const
+ {
+ if (HasVertexData::value) {
+ assert(&vertex_data >= &vertex_data_cloud_.front() &&
+ &vertex_data <= &vertex_data_cloud_.back());
+ return (VertexIndex(std::distance(&vertex_data_cloud_.front(), &vertex_data)));
+ }
+ return (VertexIndex());
+ }
+
+ /** \brief Get the index associated to the given half-edge data. */
+ inline HalfEdgeIndex
+ getHalfEdgeIndex(const HalfEdgeData& half_edge_data) const
+ {
+ if (HasHalfEdgeData::value) {
+ assert(&half_edge_data >= &half_edge_data_cloud_.front() &&
+ &half_edge_data <= &half_edge_data_cloud_.back());
+ return (HalfEdgeIndex(
+ std::distance(&half_edge_data_cloud_.front(), &half_edge_data)));
+ }
+ return (HalfEdgeIndex());
+ }
+
+ /** \brief Get the index associated to the given edge data. */
+ inline EdgeIndex
+ getEdgeIndex(const EdgeData& edge_data) const
+ {
+ if (HasEdgeData::value) {
+ assert(&edge_data >= &edge_data_cloud_.front() &&
+ &edge_data <= &edge_data_cloud_.back());
+ return (EdgeIndex(std::distance(&edge_data_cloud_.front(), &edge_data)));
+ }
+ return (EdgeIndex());
+ }
+
+ /** \brief Get the index associated to the given face data. */
+ inline FaceIndex
+ getFaceIndex(const FaceData& face_data) const
+ {
+ if (HasFaceData::value) {
+ assert(&face_data >= &face_data_cloud_.front() &&
+ &face_data <= &face_data_cloud_.back());
+ return (FaceIndex(std::distance(&face_data_cloud_.front(), &face_data)));
+ }
+ return (FaceIndex());
+ }
+
+protected:
+ ////////////////////////////////////////////////////////////////////////
+ // Types
+ ////////////////////////////////////////////////////////////////////////
+
+ // Elements
+ using Vertex = pcl::geometry::Vertex;
+ using HalfEdge = pcl::geometry::HalfEdge;
+ using Face = pcl::geometry::Face;
+
+ using Vertices = std::vector<Vertex>;
+ using HalfEdges = std::vector<HalfEdge>;
+ using Faces = std::vector<Face>;
+
+ using VertexIterator = typename Vertices::iterator;
+ using HalfEdgeIterator = typename HalfEdges::iterator;
+ using FaceIterator = typename Faces::iterator;
+
+ using VertexConstIterator = typename Vertices::const_iterator;
+ using HalfEdgeConstIterator = typename HalfEdges::const_iterator;
+ using FaceConstIterator = typename Faces::const_iterator;
+
+ /** \brief General implementation of addFace. */
+ FaceIndex
+ addFaceImplBase(const VertexIndices& vertices,
+ const FaceData& face_data,
+ const EdgeData& edge_data,
+ const HalfEdgeData& half_edge_data)
+ {
+ const int n = static_cast<int>(vertices.size());
+ if (n < 3)
+ return (FaceIndex());
+
+ // Check for topological errors
+ inner_he_.resize(n);
+ free_he_.resize(n);
+ is_new_.resize(n);
+ make_adjacent_.resize(n);
+ for (int i = 0; i < n; ++i) {
+ if (!this->checkTopology1(vertices[i],
+ vertices[(i + 1) % n],
+ inner_he_[i],
+ is_new_[i],
+ IsManifold())) {
+ return (FaceIndex());
+ }
+ }
+ for (int i = 0; i < n; ++i) {
+ int j = (i + 1) % n;
+ if (!this->checkTopology2(inner_he_[i],
+ inner_he_[j],
+ is_new_[i],
+ is_new_[j],
+ this->isIsolated(vertices[j]),
+ make_adjacent_[i],
+ free_he_[i],
+ IsManifold())) {
+ return (FaceIndex());
+ }
+ }
+
+ // Reconnect the existing half-edges if needed
+ if (!IsManifold::value) {
+ for (int i = 0; i < n; ++i) {
+ if (make_adjacent_[i]) {
+ this->makeAdjacent(inner_he_[i], inner_he_[(i + 1) % n], free_he_[i]);
+ }
+ }
+ }
+
+ // Add new half-edges if needed
+ for (int i = 0; i < n; ++i) {
+ if (is_new_[i]) {
+ inner_he_[i] = this->addEdge(
+ vertices[i], vertices[(i + 1) % n], half_edge_data, edge_data);
+ }
+ }
+
+ // Connect
+ for (int i = 0; i < n; ++i) {
+ int j = (i + 1) % n;
+ if (is_new_[i] && is_new_[j])
+ this->connectNewNew(inner_he_[i], inner_he_[j], vertices[j], IsManifold());
+ else if (is_new_[i] && !is_new_[j])
+ this->connectNewOld(inner_he_[i], inner_he_[j], vertices[j]);
+ else if (!is_new_[i] && is_new_[j])
+ this->connectOldNew(inner_he_[i], inner_he_[j], vertices[j]);
+ else
+ this->connectOldOld(inner_he_[i], inner_he_[j], vertices[j], IsManifold());
+ }
+ return (this->connectFace(inner_he_, face_data));
+ }
+
+ ////////////////////////////////////////////////////////////////////////
+ // addEdge
+ ////////////////////////////////////////////////////////////////////////
+
+ /** \brief Add an edge between the two given vertices and connect them with the
+ * vertices.
+ * \param[in] idx_v_a The first vertex index
+ * \param[in] idx_v_b The second vertex index
+ * \param[in] he_data Data associated with the half-edges. This is only added if
+ * the mesh has data associated with the half-edges.
+ * \param[in] edge_data Data associated with the edge. This is only added if the mesh
+ * has data associated with the edges.
+ * \return Index to the half-edge from vertex a to vertex b.
+ */
+ HalfEdgeIndex
+ addEdge(const VertexIndex& idx_v_a,
+ const VertexIndex& idx_v_b,
+ const HalfEdgeData& he_data,
+ const EdgeData& edge_data)
+ {
+ half_edges_.push_back(HalfEdge(idx_v_b));
+ half_edges_.push_back(HalfEdge(idx_v_a));
+
+ this->addData(half_edge_data_cloud_, he_data, HasHalfEdgeData());
+ this->addData(half_edge_data_cloud_, he_data, HasHalfEdgeData());
+ this->addData(edge_data_cloud_, edge_data, HasEdgeData());
+
+ return (HalfEdgeIndex(static_cast<int>(half_edges_.size() - 2)));
+ }
+
+ ////////////////////////////////////////////////////////////////////////
+ // topology checks
+ ////////////////////////////////////////////////////////////////////////
+
+ /** \brief Check if the edge between the two vertices can be added.
+ * \param[in] idx_v_a Index to the first vertex.
+ * \param[in] idx_v_b Index to the second vertex.
+ * \param[out] idx_he_ab Index to the half-edge ab if is_new_ab=false.
+ * \param[out] is_new_ab true if the edge between the vertices exists already. Must be
+ * initialized with true!
+ * \return true if the half-edge may be added.
+ */
+ bool
+ checkTopology1(const VertexIndex& idx_v_a,
+ const VertexIndex& idx_v_b,
+ HalfEdgeIndex& idx_he_ab,
+ std::vector<bool>::reference is_new_ab,
+ std::true_type /*is_manifold*/) const
+ {
+ is_new_ab = true;
+ if (this->isIsolated(idx_v_a))
+ return (true);
+
+ idx_he_ab = this->getOutgoingHalfEdgeIndex(idx_v_a);
+
+ if (!this->isBoundary(idx_he_ab))
+ return (false);
+ if (this->getTerminatingVertexIndex(idx_he_ab) == idx_v_b)
+ is_new_ab = false;
+ return (true);
+ }
+
+ /** \brief Non manifold version of checkTopology1 */
+ bool
+ checkTopology1(const VertexIndex& idx_v_a,
+ const VertexIndex& idx_v_b,
+ HalfEdgeIndex& idx_he_ab,
+ std::vector<bool>::reference is_new_ab,
+ std::false_type /*is_manifold*/) const
+ {
+ is_new_ab = true;
+ if (this->isIsolated(idx_v_a))
+ return (true);
+ if (!this->isBoundary(this->getOutgoingHalfEdgeIndex(idx_v_a)))
+ return (false);
+
+ VertexAroundVertexCirculator circ =
+ this->getVertexAroundVertexCirculator(this->getOutgoingHalfEdgeIndex(idx_v_a));
+ const VertexAroundVertexCirculator circ_end = circ;
+
+ do {
+ if (circ.getTargetIndex() == idx_v_b) {
+ idx_he_ab = circ.getCurrentHalfEdgeIndex();
+ if (!this->isBoundary(idx_he_ab))
+ return (false);
- /** \brief Check if the given vertex lies on the boundary. Isolated vertices are considered to be on the boundary. */
- inline bool
- isBoundary (const VertexIndex& idx_vertex) const
- {
- assert (this->isValid (idx_vertex));
- if (this->isIsolated (idx_vertex)) return (true);
- return (this->isBoundary (this->getOutgoingHalfEdgeIndex (idx_vertex)));
- }
+ is_new_ab = false;
+ return (true);
+ }
+ } while (++circ != circ_end);
+
+ return (true);
+ }
+
+ /** \brief Check if the face may be added (mesh does not become non-manifold). */
+ inline bool
+ checkTopology2(const HalfEdgeIndex& /*idx_he_ab*/,
+ const HalfEdgeIndex& /*idx_he_bc*/,
+ const bool is_new_ab,
+ const bool is_new_bc,
+ const bool is_isolated_b,
+ std::vector<bool>::reference /*make_adjacent_ab_bc*/,
+ HalfEdgeIndex& /*idx_free_half_edge*/,
+ std::true_type /*is_manifold*/) const
+ {
+ return !(is_new_ab && is_new_bc && !is_isolated_b);
+ }
+
+ /** \brief Check if the half-edge bc is the next half-edge of ab.
+ * \param[in] idx_he_ab Index to the half-edge between the vertices a and b.
+ * \param[in] idx_he_bc Index to the half-edge between the vertices b and c.
+ * \param[in] is_new_ab Half-edge ab is new.
+ * \param[in] is_new_bc Half-edge bc is new.
+ * \param[out] make_adjacent_ab_bc Half-edges ab and bc need to be made adjacent.
+ * \param[out] idx_free_half_edge Free half-edge (needed for makeAdjacent)
+ * \return true if addFace may be continued.
+ */
+ inline bool
+ checkTopology2(const HalfEdgeIndex& idx_he_ab,
+ const HalfEdgeIndex& idx_he_bc,
+ const bool is_new_ab,
+ const bool is_new_bc,
+ const bool /*is_isolated_b*/,
+ std::vector<bool>::reference make_adjacent_ab_bc,
+ HalfEdgeIndex& idx_free_half_edge,
+ std::false_type /*is_manifold*/) const
+ {
+ if (is_new_ab || is_new_bc) {
+ make_adjacent_ab_bc = false;
+ return (true); // Make adjacent is only needed for two old half-edges
+ }
+
+ if (this->getNextHalfEdgeIndex(idx_he_ab) == idx_he_bc) {
+ make_adjacent_ab_bc = false;
+ return (true); // already adjacent
+ }
+
+ make_adjacent_ab_bc = true;
+
+ // Find the next boundary half edge
+ IncomingHalfEdgeAroundVertexCirculator circ =
+ this->getIncomingHalfEdgeAroundVertexCirculator(
+ this->getOppositeHalfEdgeIndex(idx_he_bc));
+
+ do
+ ++circ;
+ while (!this->isBoundary(circ.getTargetIndex()));
+ idx_free_half_edge = circ.getTargetIndex();
+
+ // This would detach the faces around the vertex from each other.
+ return (circ.getTargetIndex() != idx_he_ab);
+ }
+
+ /** \brief Make the half-edges bc the next half-edge of ab.
+ * \param[in] idx_he_ab Index to the half-edge between the vertices a
+ * and b.
+ * \param[in] idx_he_bc Index to the half-edge between the
+ * vertices b and c.
+ * \param[in, out] idx_free_half_edge Free half-edge needed to re-connect the
+ * half-edges around vertex b.
+ */
+ void
+ makeAdjacent(const HalfEdgeIndex& idx_he_ab,
+ const HalfEdgeIndex& idx_he_bc,
+ HalfEdgeIndex& idx_free_half_edge)
+ {
+ // Re-link. No references!
+ const HalfEdgeIndex idx_he_ab_next = this->getNextHalfEdgeIndex(idx_he_ab);
+ const HalfEdgeIndex idx_he_bc_prev = this->getPrevHalfEdgeIndex(idx_he_bc);
+ const HalfEdgeIndex idx_he_free_next =
+ this->getNextHalfEdgeIndex(idx_free_half_edge);
+
+ this->connectPrevNext(idx_he_ab, idx_he_bc);
+ this->connectPrevNext(idx_free_half_edge, idx_he_ab_next);
+ this->connectPrevNext(idx_he_bc_prev, idx_he_free_next);
+ }
+
+ ////////////////////////////////////////////////////////////////////////
+ // connect
+ ////////////////////////////////////////////////////////////////////////
+
+ /** \brief Add a face to the mesh and connect it to the half-edges.
+ * \param[in] inner_he Inner half-edges of the face.
+ * \param[in] face_data Data that is stored in the face. This is only added if the
+ * mesh has data associated with the faces.
+ * \return Index to the new face.
+ */
+ FaceIndex
+ connectFace(const HalfEdgeIndices& inner_he, const FaceData& face_data)
+ {
+ faces_.push_back(Face(inner_he.back()));
+ this->addData(face_data_cloud_, face_data, HasFaceData());
- /** \brief Check if the given half-edge lies on the bounddary. */
- inline bool
- isBoundary (const HalfEdgeIndex& idx_he) const
- {
- assert (this->isValid (idx_he));
- return (!this->getFaceIndex (idx_he).isValid ());
- }
+ const FaceIndex idx_face(static_cast<int>(this->sizeFaces() - 1));
- /** \brief Check if the given edge lies on the boundary (any of the two half-edges lies on the boundary. */
- inline bool
- isBoundary (const EdgeIndex& idx_edge) const
- {
- assert (this->isValid (idx_edge));
- const HalfEdgeIndex& idx = pcl::geometry::toHalfEdgeIndex (idx_edge);
- return (this->isBoundary (idx) || this->isBoundary (this->getOppositeHalfEdgeIndex (idx)));
- }
+ for (const auto& idx_half_edge : inner_he) {
+ this->setFaceIndex(idx_half_edge, idx_face);
+ }
- /** \brief Check if the given face lies on the boundary. There are two versions of this method, selected by the template parameter.
- * \tparam CheckVerticesT Check if any vertex lies on the boundary (true) or check if any edge lies on the boundary (false).
- */
- template <bool CheckVerticesT> inline bool
- isBoundary (const FaceIndex& idx_face) const
- {
- assert (this->isValid (idx_face));
- return (this->isBoundary (idx_face, std::integral_constant <bool, CheckVerticesT> ()));
- }
+ return (idx_face);
+ }
- /** \brief Check if the given face lies on the boundary. This method uses isBoundary \c true which checks if any vertex lies on the boundary. */
- inline bool
- isBoundary (const FaceIndex& idx_face) const
- {
- assert (this->isValid (idx_face));
- return (this->isBoundary (idx_face, std::true_type ()));
- }
+ /** \brief Connect the next and prev indices of the two half-edges with each other. */
+ inline void
+ connectPrevNext(const HalfEdgeIndex& idx_he_ab, const HalfEdgeIndex& idx_he_bc)
+ {
+ this->setNextHalfEdgeIndex(idx_he_ab, idx_he_bc);
+ this->setPrevHalfEdgeIndex(idx_he_bc, idx_he_ab);
+ }
+
+ /** \brief Both half-edges are new (manifold version). */
+ void
+ connectNewNew(const HalfEdgeIndex& idx_he_ab,
+ const HalfEdgeIndex& idx_he_bc,
+ const VertexIndex& idx_v_b,
+ std::true_type /*is_manifold*/)
+ {
+ const HalfEdgeIndex idx_he_ba = this->getOppositeHalfEdgeIndex(idx_he_ab);
+ const HalfEdgeIndex idx_he_cb = this->getOppositeHalfEdgeIndex(idx_he_bc);
- ////////////////////////////////////////////////////////////////////////
- // isManifold
- ////////////////////////////////////////////////////////////////////////
+ this->connectPrevNext(idx_he_ab, idx_he_bc);
+ this->connectPrevNext(idx_he_cb, idx_he_ba);
- /** \brief Check if the given vertex is manifold. Isolated vertices are manifold. */
- inline bool
- isManifold (const VertexIndex& idx_vertex) const
- {
- assert (this->isValid (idx_vertex));
- if (this->isIsolated (idx_vertex)) return (true);
- return (this->isManifold (idx_vertex, IsManifold ()));
- }
+ this->setOutgoingHalfEdgeIndex(idx_v_b, idx_he_ba);
+ }
- /** \brief Check if the mesh is manifold. */
- inline bool
- isManifold () const
- {
- return (this->isManifold (IsManifold ()));
- }
+ /** \brief Both half-edges are new (non-manifold version). */
+ void
+ connectNewNew(const HalfEdgeIndex& idx_he_ab,
+ const HalfEdgeIndex& idx_he_bc,
+ const VertexIndex& idx_v_b,
+ std::false_type /*is_manifold*/)
+ {
+ if (this->isIsolated(idx_v_b)) {
+ this->connectNewNew(idx_he_ab, idx_he_bc, idx_v_b, std::true_type());
+ }
+ else {
+ const HalfEdgeIndex idx_he_ba = this->getOppositeHalfEdgeIndex(idx_he_ab);
+ const HalfEdgeIndex idx_he_cb = this->getOppositeHalfEdgeIndex(idx_he_bc);
+
+ // No references!
+ const HalfEdgeIndex idx_he_b_out = this->getOutgoingHalfEdgeIndex(idx_v_b);
+ const HalfEdgeIndex idx_he_b_out_prev = this->getPrevHalfEdgeIndex(idx_he_b_out);
+
+ this->connectPrevNext(idx_he_ab, idx_he_bc);
+ this->connectPrevNext(idx_he_cb, idx_he_b_out);
+ this->connectPrevNext(idx_he_b_out_prev, idx_he_ba);
+ }
+ }
+
+ /** \brief The first half-edge is new. */
+ void
+ connectNewOld(const HalfEdgeIndex& idx_he_ab,
+ const HalfEdgeIndex& idx_he_bc,
+ const VertexIndex& idx_v_b)
+ {
+ const HalfEdgeIndex idx_he_ba = this->getOppositeHalfEdgeIndex(idx_he_ab);
+ const HalfEdgeIndex idx_he_bc_prev =
+ this->getPrevHalfEdgeIndex(idx_he_bc); // No reference!
- ////////////////////////////////////////////////////////////////////////
- // size
- ////////////////////////////////////////////////////////////////////////
+ this->connectPrevNext(idx_he_ab, idx_he_bc);
+ this->connectPrevNext(idx_he_bc_prev, idx_he_ba);
- /** \brief Get the number of the vertices. */
- inline std::size_t
- sizeVertices () const
- {
- return (vertices_.size ());
- }
+ this->setOutgoingHalfEdgeIndex(idx_v_b, idx_he_ba);
+ }
- /** \brief Get the number of the half-edges. */
- inline std::size_t
- sizeHalfEdges () const
- {
- assert (half_edges_.size () % 2 == 0); // This would be a bug in the mesh.
- return (half_edges_.size ());
- }
+ /** \brief The second half-edge is new. */
+ void
+ connectOldNew(const HalfEdgeIndex& idx_he_ab,
+ const HalfEdgeIndex& idx_he_bc,
+ const VertexIndex& idx_v_b)
+ {
+ const HalfEdgeIndex idx_he_cb = this->getOppositeHalfEdgeIndex(idx_he_bc);
+ const HalfEdgeIndex idx_he_ab_next =
+ this->getNextHalfEdgeIndex(idx_he_ab); // No reference!
+
+ this->connectPrevNext(idx_he_ab, idx_he_bc);
+ this->connectPrevNext(idx_he_cb, idx_he_ab_next);
+
+ this->setOutgoingHalfEdgeIndex(idx_v_b, idx_he_ab_next);
+ }
+
+ /** \brief Both half-edges are old (manifold version). */
+ void
+ connectOldOld(const HalfEdgeIndex& /*idx_he_ab*/,
+ const HalfEdgeIndex& /*idx_he_bc*/,
+ const VertexIndex& /*idx_v_b*/,
+ std::true_type /*is_manifold*/)
+ {}
+
+ /** \brief Both half-edges are old (non-manifold version). */
+ void
+ connectOldOld(const HalfEdgeIndex& /*idx_he_ab*/,
+ const HalfEdgeIndex& idx_he_bc,
+ const VertexIndex& idx_v_b,
+ std::false_type /*is_manifold*/)
+ {
+ const HalfEdgeIndex& idx_he_b_out = this->getOutgoingHalfEdgeIndex(idx_v_b);
- /** \brief Get the number of the edges. */
- inline std::size_t
- sizeEdges () const
- {
- assert (half_edges_.size () % 2 == 0); // This would be a bug in the mesh.
- return (half_edges_.size () / 2);
- }
+ // The outgoing half edge MUST be a boundary half-edge (if there is one)
+ if (idx_he_b_out == idx_he_bc) // he_bc is no longer on the boundary
+ {
+ OutgoingHalfEdgeAroundVertexCirculator circ =
+ this->getOutgoingHalfEdgeAroundVertexCirculator(idx_he_b_out);
+ const OutgoingHalfEdgeAroundVertexCirculator circ_end = circ;
+
+ while (++circ != circ_end) {
+ if (this->isBoundary(circ.getTargetIndex())) {
+ this->setOutgoingHalfEdgeIndex(idx_v_b, circ.getTargetIndex());
+ return;
+ }
+ }
+ }
+ }
+
+ ////////////////////////////////////////////////////////////////////////
+ // addData
+ ////////////////////////////////////////////////////////////////////////
+
+ /** \brief Add mesh data. */
+ template <class DataT>
+ inline void
+ addData(pcl::PointCloud<DataT>& cloud, const DataT& data, std::true_type /*has_data*/)
+ {
+ cloud.push_back(data);
+ }
+
+ /** \brief Does nothing. */
+ template <class DataT>
+ inline void
+ addData(pcl::PointCloud<DataT>& /*cloud*/,
+ const DataT& /*data*/,
+ std::false_type /*has_data*/)
+ {}
+
+ ////////////////////////////////////////////////////////////////////////
+ // deleteFace
+ ////////////////////////////////////////////////////////////////////////
+
+ /** \brief Manifold version of deleteFace. If the mesh becomes non-manifold due to the
+ * delete operation the faces around the non-manifold vertex are deleted until the
+ * mesh becomes manifold again. */
+ void
+ deleteFace(const FaceIndex& idx_face, std::true_type /*is_manifold*/)
+ {
+ assert(this->isValid(idx_face));
+ delete_faces_face_.clear();
+ delete_faces_face_.push_back(idx_face);
+
+ while (!delete_faces_face_.empty()) {
+ const FaceIndex idx_face_cur = delete_faces_face_.back();
+ delete_faces_face_.pop_back();
+
+ // This calls the non-manifold version of deleteFace, which will call the manifold
+ // version of reconnect.
+ this->deleteFace(idx_face_cur, std::false_type());
+ }
+ }
+
+ /** \brief Non-manifold version of deleteFace. */
+ void
+ deleteFace(const FaceIndex& idx_face, std::false_type /*is_manifold*/)
+ {
+ assert(this->isValid(idx_face));
+ if (this->isDeleted(idx_face))
+ return;
+
+ // Store all half-edges in the face
+ inner_he_.clear();
+ is_boundary_.clear();
+ InnerHalfEdgeAroundFaceCirculator circ =
+ this->getInnerHalfEdgeAroundFaceCirculator(idx_face);
+ const InnerHalfEdgeAroundFaceCirculator circ_end = circ;
+ do {
+ inner_he_.push_back(circ.getTargetIndex());
+ is_boundary_.push_back(
+ this->isBoundary(this->getOppositeHalfEdgeIndex(circ.getTargetIndex())));
+ } while (++circ != circ_end);
+ assert(inner_he_.size() >= 3); // Minimum should be a triangle.
+
+ const int n = static_cast<int>(inner_he_.size());
+ int j;
+
+ if (IsManifold::value) {
+ for (int i = 0; i < n; ++i) {
+ j = (i + 1) % n;
+ this->reconnect(inner_he_[i], inner_he_[j], is_boundary_[i], is_boundary_[j]);
+ }
+ for (int i = 0; i < n; ++i) {
+ this->getHalfEdge(inner_he_[i]).idx_face_.invalidate();
+ }
+ }
+ else {
+ for (int i = 0; i < n; ++i) {
+ j = (i + 1) % n;
+ this->reconnect(inner_he_[i], inner_he_[j], is_boundary_[i], is_boundary_[j]);
+ this->getHalfEdge(inner_he_[i]).idx_face_.invalidate();
+ }
+ }
+
+ this->markDeleted(idx_face);
+ }
+
+ ////////////////////////////////////////////////////////////////////////
+ // reconnect
+ ////////////////////////////////////////////////////////////////////////
+
+ /** \brief Deconnect the input half-edges from the mesh and adjust the indices of the
+ * connected half-edges. */
+ void
+ reconnect(const HalfEdgeIndex& idx_he_ab,
+ const HalfEdgeIndex& idx_he_bc,
+ const bool is_boundary_ba,
+ const bool is_boundary_cb)
+ {
+ const HalfEdgeIndex idx_he_ba = this->getOppositeHalfEdgeIndex(idx_he_ab);
+ const HalfEdgeIndex idx_he_cb = this->getOppositeHalfEdgeIndex(idx_he_bc);
+ const VertexIndex idx_v_b = this->getTerminatingVertexIndex(idx_he_ab);
- /** \brief Get the number of the faces. */
- inline std::size_t
- sizeFaces () const
- {
- return (faces_.size ());
- }
+ if (is_boundary_ba && is_boundary_cb) // boundary - boundary
+ {
+ const HalfEdgeIndex& idx_he_cb_next = this->getNextHalfEdgeIndex(idx_he_cb);
+
+ if (idx_he_cb_next == idx_he_ba) // Vertex b is isolated
+ {
+ this->markDeleted(idx_v_b);
+ }
+ else {
+ this->connectPrevNext(this->getPrevHalfEdgeIndex(idx_he_ba), idx_he_cb_next);
+ this->setOutgoingHalfEdgeIndex(idx_v_b, idx_he_cb_next);
+ }
+
+ this->markDeleted(idx_he_ab);
+ this->markDeleted(idx_he_ba);
+ }
+ else if (is_boundary_ba && !is_boundary_cb) // boundary - no boundary
+ {
+ this->connectPrevNext(this->getPrevHalfEdgeIndex(idx_he_ba), idx_he_bc);
+ this->setOutgoingHalfEdgeIndex(idx_v_b, idx_he_bc);
- ////////////////////////////////////////////////////////////////////////
- // empty
- ////////////////////////////////////////////////////////////////////////
+ this->markDeleted(idx_he_ab);
+ this->markDeleted(idx_he_ba);
+ }
+ else if (!is_boundary_ba && is_boundary_cb) // no boundary - boundary
+ {
+ const HalfEdgeIndex& idx_he_cb_next = this->getNextHalfEdgeIndex(idx_he_cb);
+ this->connectPrevNext(idx_he_ab, idx_he_cb_next);
+ this->setOutgoingHalfEdgeIndex(idx_v_b, idx_he_cb_next);
+ }
+ else // no boundary - no boundary
+ {
+ this->reconnectNBNB(idx_he_bc, idx_he_cb, idx_v_b, IsManifold());
+ }
+ }
+
+ /** \brief Both edges are not on the boundary. Manifold version. */
+ void
+ reconnectNBNB(const HalfEdgeIndex& idx_he_bc,
+ const HalfEdgeIndex& idx_he_cb,
+ const VertexIndex& idx_v_b,
+ std::true_type /*is_manifold*/)
+ {
+ if (this->isBoundary(idx_v_b)) {
+ // Deletion of this face makes the mesh non-manifold
+ // -> delete the neighboring faces until it is manifold again
+ IncomingHalfEdgeAroundVertexCirculator circ =
+ this->getIncomingHalfEdgeAroundVertexCirculator(idx_he_cb);
- /** \brief Check if the mesh is empty. */
- inline bool
- empty () const
- {
- return (this->emptyVertices () && this->emptyEdges () && this->emptyFaces ());
- }
+ while (!this->isBoundary(circ.getTargetIndex())) {
+ delete_faces_face_.push_back(this->getFaceIndex((circ++).getTargetIndex()));
- /** \brief Check if the vertices are empty. */
- inline bool
- emptyVertices () const
+#ifdef PCL_GEOMETRY_MESH_BASE_TEST_DELETE_FACE_MANIFOLD_2
+ if (circ == this->getIncomingHalfEdgeAroundVertexCirculator(
+ idx_he_cb)) // Abort infinity loop
{
- return (vertices_.empty ());
+ // In a manifold mesh we can't invalidate the face while reconnecting!
+ // See the implementation of
+ // deleteFace (const FaceIndex& idx_face,
+ // std::false_type /*is_manifold*/)
+ pcl::geometry::g_pcl_geometry_mesh_base_test_delete_face_manifold_2_success =
+ false;
+ return;
}
+#endif
+ }
+ }
+ else {
+ this->setOutgoingHalfEdgeIndex(idx_v_b, idx_he_bc);
+ }
+ }
+
+ /** \brief Both edges are not on the boundary. Non-manifold version. */
+ void
+ reconnectNBNB(const HalfEdgeIndex& idx_he_bc,
+ const HalfEdgeIndex& /*idx_he_cb*/,
+ const VertexIndex& idx_v_b,
+ std::false_type /*is_manifold*/)
+ {
+ if (!this->isBoundary(idx_v_b)) {
+ this->setOutgoingHalfEdgeIndex(idx_v_b, idx_he_bc);
+ }
+ }
+
+ ////////////////////////////////////////////////////////////////////////
+ // markDeleted
+ ////////////////////////////////////////////////////////////////////////
+
+ /** \brief Mark the given vertex as deleted. */
+ inline void
+ markDeleted(const VertexIndex& idx_vertex)
+ {
+ assert(this->isValid(idx_vertex));
+ this->getVertex(idx_vertex).idx_outgoing_half_edge_.invalidate();
+ }
- /** \brief Check if the edges are empty. */
- inline bool
- emptyEdges () const
- {
- return (half_edges_.empty ());
- }
+ /** \brief Mark the given half-edge as deleted. */
+ inline void
+ markDeleted(const HalfEdgeIndex& idx_he)
+ {
+ assert(this->isValid(idx_he));
+ this->getHalfEdge(idx_he).idx_terminating_vertex_.invalidate();
+ }
- /** \brief Check if the faces are empty. */
- inline bool
- emptyFaces () const
- {
- return (faces_.empty ());
- }
+ /** \brief Mark the given edge (both half-edges) as deleted. */
+ inline void
+ markDeleted(const EdgeIndex& idx_edge)
+ {
+ assert(this->isValid(idx_edge));
+ this->markDeleted(pcl::geometry::toHalfEdgeIndex(idx_edge, true));
+ this->markDeleted(pcl::geometry::toHalfEdgeIndex(idx_edge, false));
+ }
+
+ /** \brief Mark the given face as deleted. */
+ inline void
+ markDeleted(const FaceIndex& idx_face)
+ {
+ assert(this->isValid(idx_face));
+ this->getFace(idx_face).idx_inner_half_edge_.invalidate();
+ }
+
+ ////////////////////////////////////////////////////////////////////////
+ // For cleanUp
+ ////////////////////////////////////////////////////////////////////////
+
+ /** \brief Removes mesh elements and data that are marked as deleted from the
+ * container.
+ * \tparam ElementContainerT e.g. std::vector \<Vertex\>
+ * \tparam DataContainerT e.g. std::vector \<VertexData\>
+ * \tparam IndexContainerT e.g. std::vector \<VertexIndex\>
+ * \tparam HasDataT Integral constant specifying if the mesh has data
+ * associated with the elements.
+ * \param[in, out] elements Container for the mesh elements. Resized to the new size.
+ * \param[in, out] data_cloud Container for the mesh data. Resized to the new size.
+ * \return Container with the same size as the old input data. Holds the indices to
+ * the new elements for each non-deleted element and an invalid index if it is
+ * deleted.
+ */
+ template <class ElementContainerT,
+ class DataContainerT,
+ class IndexContainerT,
+ class HasDataT>
+ IndexContainerT
+ remove(ElementContainerT& elements, DataContainerT& data_cloud)
+ {
+ using Index = typename IndexContainerT::value_type;
+ using Element = typename ElementContainerT::value_type;
+
+ if (HasDataT::value)
+ assert(elements.size() == data_cloud.size());
+ else
+ assert(data_cloud.empty()); // Bug in this class!
+
+ IndexContainerT new_indices(elements.size(),
+ typename IndexContainerT::value_type());
+ Index ind_old(0), ind_new(0);
+
+ typename ElementContainerT::const_iterator it_e_old = elements.begin();
+ typename ElementContainerT::iterator it_e_new = elements.begin();
+
+ typename DataContainerT::const_iterator it_d_old = data_cloud.begin();
+ typename DataContainerT::iterator it_d_new = data_cloud.begin();
+
+ typename IndexContainerT::iterator it_ind_new = new_indices.begin();
+ typename IndexContainerT::const_iterator it_ind_new_end = new_indices.end();
+
+ while (it_ind_new != it_ind_new_end) {
+ if (!this->isDeleted(ind_old)) {
+ *it_ind_new = ind_new++;
+
+ // TODO: Test for self assignment?
+ *it_e_new++ = *it_e_old;
+ this->assignIf(it_d_old, it_d_new, HasDataT());
+ this->incrementIf(it_d_new, HasDataT());
+ }
+ ++ind_old;
+ ++it_e_old;
+ this->incrementIf(it_d_old, HasDataT());
+ ++it_ind_new;
+ }
+
+ elements.resize(ind_new.get(), Element());
+ if (HasDataT::value) {
+ data_cloud.resize(ind_new.get());
+ }
+ else if (it_d_old != data_cloud.begin() || it_d_new != data_cloud.begin()) {
+ std::cerr << "TODO: Bug in MeshBase::remove!\n";
+ assert(false);
+ exit(EXIT_FAILURE);
+ }
+
+ return (new_indices);
+ }
+
+ /** \brief Increment the iterator. */
+ template <class IteratorT>
+ inline void
+ incrementIf(IteratorT& it, std::true_type /*has_data*/) const
+ {
+ ++it;
+ }
+
+ /** \brief Does nothing. */
+ template <class IteratorT>
+ inline void
+ incrementIf(IteratorT& /*it*/, std::false_type /*has_data*/) const
+ {}
+
+ /** \brief Assign the source iterator to the target iterator. */
+ template <class ConstIteratorT, class IteratorT>
+ inline void
+ assignIf(const ConstIteratorT source,
+ IteratorT target,
+ std::true_type /*has_data*/) const
+ {
+ *target = *source;
+ }
+
+ /** \brief Does nothing. */
+ template <class ConstIteratorT, class IteratorT>
+ inline void
+ assignIf(const ConstIteratorT /*source*/,
+ IteratorT /*target*/,
+ std::false_type /*has_data*/) const
+ {}
+
+ ////////////////////////////////////////////////////////////////////////
+ // Vertex / Half-edge / Face connectivity
+ ////////////////////////////////////////////////////////////////////////
+
+ /** \brief Set the outgoing half-edge index to a given vertex. */
+ inline void
+ setOutgoingHalfEdgeIndex(const VertexIndex& idx_vertex,
+ const HalfEdgeIndex& idx_outgoing_half_edge)
+ {
+ assert(this->isValid(idx_vertex));
+ this->getVertex(idx_vertex).idx_outgoing_half_edge_ = idx_outgoing_half_edge;
+ }
+
+ /** \brief Set the terminating vertex index to a given half-edge. */
+ inline void
+ setTerminatingVertexIndex(const HalfEdgeIndex& idx_half_edge,
+ const VertexIndex& idx_terminating_vertex)
+ {
+ assert(this->isValid(idx_half_edge));
+ this->getHalfEdge(idx_half_edge).idx_terminating_vertex_ = idx_terminating_vertex;
+ }
+
+ /** \brief Set the next half_edge index to a given half-edge. */
+ inline void
+ setNextHalfEdgeIndex(const HalfEdgeIndex& idx_half_edge,
+ const HalfEdgeIndex& idx_next_half_edge)
+ {
+ assert(this->isValid(idx_half_edge));
+ this->getHalfEdge(idx_half_edge).idx_next_half_edge_ = idx_next_half_edge;
+ }
+
+ /** \brief Set the previous half-edge index to a given half-edge. */
+ inline void
+ setPrevHalfEdgeIndex(const HalfEdgeIndex& idx_half_edge,
+ const HalfEdgeIndex& idx_prev_half_edge)
+ {
+ assert(this->isValid(idx_half_edge));
+ this->getHalfEdge(idx_half_edge).idx_prev_half_edge_ = idx_prev_half_edge;
+ }
- ////////////////////////////////////////////////////////////////////////
- // reserve
- ////////////////////////////////////////////////////////////////////////
+ /** \brief Set the face index to a given half-edge. */
+ inline void
+ setFaceIndex(const HalfEdgeIndex& idx_half_edge, const FaceIndex& idx_face)
+ {
+ assert(this->isValid(idx_half_edge));
+ this->getHalfEdge(idx_half_edge).idx_face_ = idx_face;
+ }
+
+ /** \brief Set the inner half-edge index to a given face. */
+ inline void
+ setInnerHalfEdgeIndex(const FaceIndex& idx_face,
+ const HalfEdgeIndex& idx_inner_half_edge)
+ {
+ assert(this->isValid(idx_face));
+ this->getFace(idx_face).idx_inner_half_edge_ = idx_inner_half_edge;
+ }
- /** \brief Reserve storage space n vertices. */
- inline void
- reserveVertices (const std::size_t n)
- {
- vertices_.reserve (n);
- this->reserveData (vertex_data_cloud_, n, HasVertexData ());
- }
+ ////////////////////////////////////////////////////////////////////////
+ // isBoundary / isManifold
+ ////////////////////////////////////////////////////////////////////////
- /** \brief Reserve storage space for n edges (2*n storage space is reserved for the half-edges). */
- inline void
- reserveEdges (const std::size_t n)
- {
- half_edges_.reserve (2*n);
- this->reserveData (half_edge_data_cloud_, 2*n, HasHalfEdgeData ());
- this->reserveData (edge_data_cloud_ , n, HasEdgeData ());
- }
+ /** \brief Check if any vertex of the face lies on the boundary. */
+ bool
+ isBoundary(const FaceIndex& idx_face, std::true_type /*check_vertices*/) const
+ {
+ VertexAroundFaceCirculator circ = this->getVertexAroundFaceCirculator(idx_face);
+ const VertexAroundFaceCirculator circ_end = circ;
- /** \brief Reserve storage space for n faces. */
- inline void
- reserveFaces (const std::size_t n)
- {
- faces_.reserve (n);
- this->reserveData (face_data_cloud_, n, HasFaceData ());
- }
-
- ////////////////////////////////////////////////////////////////////////
- // resize
- ////////////////////////////////////////////////////////////////////////
-
- /** \brief Resize the the vertices to n elements. */
- inline void
- resizeVertices (const std::size_t n, const VertexData& data = VertexData ())
- {
- vertices_.resize (n);
- this->resizeData (vertex_data_cloud_, n, data, HasVertexData ());
- }
-
- /** \brief Resize the edges to n elements (half-edges will hold 2*n elements). */
- inline void
- resizeEdges (const std::size_t n,
- const EdgeData& edge_data = EdgeData (),
- const HalfEdgeData he_data = HalfEdgeData ())
- {
- half_edges_.resize (2*n);
- this->resizeData (half_edge_data_cloud_, 2*n, he_data , HasHalfEdgeData ());
- this->resizeData (edge_data_cloud_ , n, edge_data, HasEdgeData ());
- }
-
- /** \brief Resize the faces to n elements. */
- inline void
- resizeFaces (const std::size_t n, const FaceData& data = FaceData ())
- {
- faces_.resize (n);
- this->resizeData (face_data_cloud_, n, data, HasFaceData ());
- }
-
- ////////////////////////////////////////////////////////////////////////
- // clear
- ////////////////////////////////////////////////////////////////////////
-
- /** \brief Clear all mesh elements and data. */
- void
- clear ()
- {
- vertices_.clear ();
- half_edges_.clear ();
- faces_.clear ();
-
- this->clearData (vertex_data_cloud_ , HasVertexData ());
- this->clearData (half_edge_data_cloud_, HasHalfEdgeData ());
- this->clearData (edge_data_cloud_ , HasEdgeData ());
- this->clearData (face_data_cloud_ , HasFaceData ());
- }
-
- ////////////////////////////////////////////////////////////////////////
- // get / set the vertex data cloud
- ////////////////////////////////////////////////////////////////////////
-
- /** \brief Get access to the stored vertex data.
- * \warning Please make sure to NOT add or remove elements from the cloud.
- */
- inline VertexDataCloud&
- getVertexDataCloud ()
- {
- return (vertex_data_cloud_);
- }
-
- /** \brief Get the stored vertex data. */
- inline VertexDataCloud
- getVertexDataCloud () const
- {
- return (vertex_data_cloud_);
- }
-
- /** \brief Change the stored vertex data.
- * \param[in] vertex_data_cloud The new vertex data. Must be the same as the current data.
- * \return true if the cloud could be set.
- */
- inline bool
- setVertexDataCloud (const VertexDataCloud& vertex_data_cloud)
- {
- if (vertex_data_cloud.size () == vertex_data_cloud_.size ())
- {
- vertex_data_cloud_ = vertex_data_cloud;
- return (true);
- }
- return (false);
- }
-
- ////////////////////////////////////////////////////////////////////////
- // get / set the half-edge data cloud
- ////////////////////////////////////////////////////////////////////////
-
- /** \brief Get access to the stored half-edge data.
- * \warning Please make sure to NOT add or remove elements from the cloud.
- */
- inline HalfEdgeDataCloud&
- getHalfEdgeDataCloud ()
- {
- return (half_edge_data_cloud_);
- }
-
- /** \brief Get the stored half-edge data. */
- inline HalfEdgeDataCloud
- getHalfEdgeDataCloud () const
- {
- return (half_edge_data_cloud_);
- }
-
- /** \brief Change the stored half-edge data.
- * \param[in] half_edge_data_cloud The new half-edge data. Must be the same as the current data.
- * \return true if the cloud could be set.
- */
- inline bool
- setHalfEdgeDataCloud (const HalfEdgeDataCloud& half_edge_data_cloud)
- {
- if (half_edge_data_cloud.size () == half_edge_data_cloud_.size ())
- {
- half_edge_data_cloud_ = half_edge_data_cloud;
- return (true);
- }
- return (false);
- }
-
- ////////////////////////////////////////////////////////////////////////
- // get / set the edge data cloud
- ////////////////////////////////////////////////////////////////////////
-
- /** \brief Get access to the stored edge data.
- * \warning Please make sure to NOT add or remove elements from the cloud.
- */
- inline EdgeDataCloud&
- getEdgeDataCloud ()
- {
- return (edge_data_cloud_);
- }
-
- /** \brief Get the stored edge data. */
- inline EdgeDataCloud
- getEdgeDataCloud () const
- {
- return (edge_data_cloud_);
- }
-
- /** \brief Change the stored edge data.
- * \param[in] edge_data_cloud The new edge data. Must be the same as the current data.
- * \return true if the cloud could be set.
- */
- inline bool
- setEdgeDataCloud (const EdgeDataCloud& edge_data_cloud)
- {
- if (edge_data_cloud.size () == edge_data_cloud_.size ())
- {
- edge_data_cloud_ = edge_data_cloud;
- return (true);
- }
- return (false);
- }
-
- ////////////////////////////////////////////////////////////////////////
- // get / set the face data cloud
- ////////////////////////////////////////////////////////////////////////
-
- /** \brief Get access to the stored face data.
- * \warning Please make sure to NOT add or remove elements from the cloud.
- */
- inline FaceDataCloud&
- getFaceDataCloud ()
- {
- return (face_data_cloud_);
- }
-
- /** \brief Get the stored face data. */
- inline FaceDataCloud
- getFaceDataCloud () const
- {
- return (face_data_cloud_);
- }
-
- /** \brief Change the stored face data.
- * \param[in] face_data_cloud The new face data. Must be the same as the current data.
- * \return true if the cloud could be set.
- */
- inline bool
- setFaceDataCloud (const FaceDataCloud& face_data_cloud)
- {
- if (face_data_cloud.size () == face_data_cloud_.size ())
- {
- face_data_cloud_ = face_data_cloud;
- return (true);
- }
- return (false);
- }
-
- ////////////////////////////////////////////////////////////////////////
- // getVertexIndex / getHalfEdgeIndex / getEdgeIndex / getFaceIndex
- ////////////////////////////////////////////////////////////////////////
-
- /** \brief Get the index associated to the given vertex data.
- * \return Invalid index if the mesh does not have associated vertex data.
- */
- inline VertexIndex
- getVertexIndex (const VertexData& vertex_data) const
- {
- if (HasVertexData::value)
- {
- assert (&vertex_data >= &vertex_data_cloud_.front () && &vertex_data <= &vertex_data_cloud_.back ());
- return (VertexIndex (std::distance (&vertex_data_cloud_.front (), &vertex_data)));
- }
- return (VertexIndex ());
- }
-
- /** \brief Get the index associated to the given half-edge data. */
- inline HalfEdgeIndex
- getHalfEdgeIndex (const HalfEdgeData& half_edge_data) const
- {
- if (HasHalfEdgeData::value)
- {
- assert (&half_edge_data >= &half_edge_data_cloud_.front () && &half_edge_data <= &half_edge_data_cloud_.back ());
- return (HalfEdgeIndex (std::distance (&half_edge_data_cloud_.front (), &half_edge_data)));
- }
- return (HalfEdgeIndex ());
- }
-
- /** \brief Get the index associated to the given edge data. */
- inline EdgeIndex
- getEdgeIndex (const EdgeData& edge_data) const
- {
- if (HasEdgeData::value)
- {
- assert (&edge_data >= &edge_data_cloud_.front () && &edge_data <= &edge_data_cloud_.back ());
- return (EdgeIndex (std::distance (&edge_data_cloud_.front (), &edge_data)));
- }
- return (EdgeIndex ());
- }
-
- /** \brief Get the index associated to the given face data. */
- inline FaceIndex
- getFaceIndex (const FaceData& face_data) const
- {
- if (HasFaceData::value)
- {
- assert (&face_data >= &face_data_cloud_.front () && &face_data <= &face_data_cloud_.back ());
- return (FaceIndex (std::distance (&face_data_cloud_.front (), &face_data)));
- }
- return (FaceIndex ());
- }
-
- protected:
-
- ////////////////////////////////////////////////////////////////////////
- // Types
- ////////////////////////////////////////////////////////////////////////
+ do {
+ if (this->isBoundary(circ.getTargetIndex())) {
+ return (true);
+ }
+ } while (++circ != circ_end);
- // Elements
- using Vertex = pcl::geometry::Vertex;
- using HalfEdge = pcl::geometry::HalfEdge;
- using Face = pcl::geometry::Face;
-
- using Vertices = std::vector<Vertex>;
- using HalfEdges = std::vector<HalfEdge>;
- using Faces = std::vector<Face>;
-
- using VertexIterator = typename Vertices::iterator;
- using HalfEdgeIterator = typename HalfEdges::iterator;
- using FaceIterator = typename Faces::iterator;
-
- using VertexConstIterator = typename Vertices::const_iterator;
- using HalfEdgeConstIterator = typename HalfEdges::const_iterator;
- using FaceConstIterator = typename Faces::const_iterator;
-
- /** \brief General implementation of addFace. */
- FaceIndex
- addFaceImplBase (const VertexIndices& vertices,
- const FaceData& face_data,
- const EdgeData& edge_data,
- const HalfEdgeData& half_edge_data)
- {
- const int n = static_cast<int> (vertices.size ());
- if (n < 3) return (FaceIndex ());
-
- // Check for topological errors
- inner_he_.resize (n);
- free_he_.resize (n);
- is_new_.resize (n);
- make_adjacent_.resize (n);
- for (int i=0; i<n; ++i)
- {
- if (!this->checkTopology1 (vertices [i], vertices [(i+1)%n], inner_he_ [i], is_new_ [i], IsManifold ()))
- {
- return (FaceIndex ());
- }
- }
- for (int i=0; i<n; ++i)
- {
- int j = (i+1)%n;
- if (!this->checkTopology2 (inner_he_ [i], inner_he_ [j], is_new_ [i], is_new_ [j], this->isIsolated (vertices [j]), make_adjacent_ [i], free_he_ [i], IsManifold ()))
- {
- return (FaceIndex ());
- }
- }
-
- // Reconnect the existing half-edges if needed
- if (!IsManifold::value)
- {
- for (int i=0; i<n; ++i)
- {
- if (make_adjacent_ [i])
- {
- this->makeAdjacent (inner_he_ [i], inner_he_ [(i+1)%n], free_he_ [i]);
- }
- }
- }
-
- // Add new half-edges if needed
- for (int i=0; i<n; ++i)
- {
- if (is_new_ [i])
- {
- inner_he_ [i] = this->addEdge (vertices [i], vertices [(i+1)%n], half_edge_data, edge_data);
- }
- }
-
- // Connect
- for (int i=0; i<n; ++i)
- {
- int j = (i+1)%n;
- if ( is_new_ [i] && is_new_ [j]) this->connectNewNew (inner_he_ [i], inner_he_ [j], vertices [j], IsManifold ());
- else if ( is_new_ [i] && !is_new_ [j]) this->connectNewOld (inner_he_ [i], inner_he_ [j], vertices [j]);
- else if (!is_new_ [i] && is_new_ [j]) this->connectOldNew (inner_he_ [i], inner_he_ [j], vertices [j]);
- else this->connectOldOld (inner_he_ [i], inner_he_ [j], vertices [j], IsManifold ());
- }
- return (this->connectFace (inner_he_, face_data));
- }
-
- ////////////////////////////////////////////////////////////////////////
- // addEdge
- ////////////////////////////////////////////////////////////////////////
-
- /** \brief Add an edge between the two given vertices and connect them with the vertices.
- * \param[in] idx_v_a The first vertex index
- * \param[in] idx_v_b The second vertex index
- * \param[in] he_data Data associated with the half-edges. This is only added if the mesh has data associated with the half-edges.
- * \param[in] edge_data Data associated with the edge. This is only added if the mesh has data associated with the edges.
- * \return Index to the half-edge from vertex a to vertex b.
- */
- HalfEdgeIndex
- addEdge (const VertexIndex& idx_v_a,
- const VertexIndex& idx_v_b,
- const HalfEdgeData& he_data,
- const EdgeData& edge_data)
- {
- half_edges_.push_back (HalfEdge (idx_v_b));
- half_edges_.push_back (HalfEdge (idx_v_a));
-
- this->addData (half_edge_data_cloud_, he_data , HasHalfEdgeData ());
- this->addData (half_edge_data_cloud_, he_data , HasHalfEdgeData ());
- this->addData (edge_data_cloud_ , edge_data, HasEdgeData ());
-
- return (HalfEdgeIndex (static_cast <int> (half_edges_.size () - 2)));
- }
-
- ////////////////////////////////////////////////////////////////////////
- // topology checks
- ////////////////////////////////////////////////////////////////////////
-
- /** \brief Check if the edge between the two vertices can be added.
- * \param[in] idx_v_a Index to the first vertex.
- * \param[in] idx_v_b Index to the second vertex.
- * \param[out] idx_he_ab Index to the half-edge ab if is_new_ab=false.
- * \param[out] is_new_ab true if the edge between the vertices exists already. Must be initialized with true!
- * \return true if the half-edge may be added.
- */
- bool
- checkTopology1 (const VertexIndex& idx_v_a,
- const VertexIndex& idx_v_b,
- HalfEdgeIndex& idx_he_ab,
- std::vector <bool>::reference is_new_ab,
- std::true_type /*is_manifold*/) const
- {
- is_new_ab = true;
- if (this->isIsolated (idx_v_a)) return (true);
-
- idx_he_ab = this->getOutgoingHalfEdgeIndex (idx_v_a);
-
- if (!this->isBoundary (idx_he_ab)) return (false);
- if (this->getTerminatingVertexIndex (idx_he_ab) == idx_v_b) is_new_ab = false;
- return (true);
- }
-
- /** \brief Non manifold version of checkTopology1 */
- bool
- checkTopology1 (const VertexIndex& idx_v_a,
- const VertexIndex& idx_v_b,
- HalfEdgeIndex& idx_he_ab,
- std::vector <bool>::reference is_new_ab,
- std::false_type /*is_manifold*/) const
- {
- is_new_ab = true;
- if (this->isIsolated (idx_v_a)) return (true);
- if (!this->isBoundary (this->getOutgoingHalfEdgeIndex (idx_v_a))) return (false);
-
- VertexAroundVertexCirculator circ = this->getVertexAroundVertexCirculator (this->getOutgoingHalfEdgeIndex (idx_v_a));
- const VertexAroundVertexCirculator circ_end = circ;
-
- do
- {
- if (circ.getTargetIndex () == idx_v_b)
- {
- idx_he_ab = circ.getCurrentHalfEdgeIndex ();
- if (!this->isBoundary (idx_he_ab)) return (false);
-
- is_new_ab = false;
- return (true);
- }
- } while (++circ!=circ_end);
-
- return (true);
- }
-
- /** \brief Check if the face may be added (mesh does not become non-manifold). */
- inline bool
- checkTopology2 (const HalfEdgeIndex& /*idx_he_ab*/,
- const HalfEdgeIndex& /*idx_he_bc*/,
- const bool is_new_ab,
- const bool is_new_bc,
- const bool is_isolated_b,
- std::vector <bool>::reference /*make_adjacent_ab_bc*/,
- HalfEdgeIndex& /*idx_free_half_edge*/,
- std::true_type /*is_manifold*/) const
- {
- return !(is_new_ab && is_new_bc && !is_isolated_b);
- }
-
- /** \brief Check if the half-edge bc is the next half-edge of ab.
- * \param[in] idx_he_ab Index to the half-edge between the vertices a and b.
- * \param[in] idx_he_bc Index to the half-edge between the vertices b and c.
- * \param[in] is_new_ab Half-edge ab is new.
- * \param[in] is_new_bc Half-edge bc is new.
- * \param[out] make_adjacent_ab_bc Half-edges ab and bc need to be made adjacent.
- * \param[out] idx_free_half_edge Free half-edge (needed for makeAdjacent)
- * \return true if addFace may be continued.
- */
- inline bool
- checkTopology2 (const HalfEdgeIndex& idx_he_ab,
- const HalfEdgeIndex& idx_he_bc,
- const bool is_new_ab,
- const bool is_new_bc,
- const bool /*is_isolated_b*/,
- std::vector <bool>::reference make_adjacent_ab_bc,
- HalfEdgeIndex& idx_free_half_edge,
- std::false_type /*is_manifold*/) const
- {
- if (is_new_ab || is_new_bc)
- {
- make_adjacent_ab_bc = false;
- return (true); // Make adjacent is only needed for two old half-edges
- }
-
- if (this->getNextHalfEdgeIndex (idx_he_ab) == idx_he_bc)
- {
- make_adjacent_ab_bc = false;
- return (true); // already adjacent
- }
-
- make_adjacent_ab_bc = true;
-
- // Find the next boundary half edge
- IncomingHalfEdgeAroundVertexCirculator circ = this->getIncomingHalfEdgeAroundVertexCirculator (this->getOppositeHalfEdgeIndex (idx_he_bc));
-
- do ++circ; while (!this->isBoundary (circ.getTargetIndex ()));
- idx_free_half_edge = circ.getTargetIndex ();
-
- // This would detach the faces around the vertex from each other.
- return (circ.getTargetIndex () != idx_he_ab);
- }
-
- /** \brief Make the half-edges bc the next half-edge of ab.
- * \param[in] idx_he_ab Index to the half-edge between the vertices a and b.
- * \param[in] idx_he_bc Index to the half-edge between the vertices b and c.
- * \param[in, out] idx_free_half_edge Free half-edge needed to re-connect the half-edges around vertex b.
- */
- void
- makeAdjacent (const HalfEdgeIndex& idx_he_ab,
- const HalfEdgeIndex& idx_he_bc,
- HalfEdgeIndex& idx_free_half_edge)
- {
- // Re-link. No references!
- const HalfEdgeIndex idx_he_ab_next = this->getNextHalfEdgeIndex (idx_he_ab);
- const HalfEdgeIndex idx_he_bc_prev = this->getPrevHalfEdgeIndex (idx_he_bc);
- const HalfEdgeIndex idx_he_free_next = this->getNextHalfEdgeIndex (idx_free_half_edge);
-
- this->connectPrevNext (idx_he_ab, idx_he_bc);
- this->connectPrevNext (idx_free_half_edge, idx_he_ab_next);
- this->connectPrevNext (idx_he_bc_prev, idx_he_free_next);
- }
-
- ////////////////////////////////////////////////////////////////////////
- // connect
- ////////////////////////////////////////////////////////////////////////
-
- /** \brief Add a face to the mesh and connect it to the half-edges.
- * \param[in] inner_he Inner half-edges of the face.
- * \param[in] face_data Data that is stored in the face. This is only added if the mesh has data associated with the faces.
- * \return Index to the new face.
- */
- FaceIndex
- connectFace (const HalfEdgeIndices& inner_he,
- const FaceData& face_data)
- {
- faces_.push_back (Face (inner_he.back ()));
- this->addData (face_data_cloud_, face_data, HasFaceData ());
-
- const FaceIndex idx_face (static_cast <int> (this->sizeFaces () - 1));
-
- for (const auto &idx_half_edge : inner_he)
- {
- this->setFaceIndex (idx_half_edge, idx_face);
- }
-
- return (idx_face);
- }
-
- /** \brief Connect the next and prev indices of the two half-edges with each other. */
- inline void
- connectPrevNext (const HalfEdgeIndex& idx_he_ab,
- const HalfEdgeIndex& idx_he_bc)
- {
- this->setNextHalfEdgeIndex (idx_he_ab, idx_he_bc);
- this->setPrevHalfEdgeIndex (idx_he_bc, idx_he_ab);
- }
-
- /** \brief Both half-edges are new (manifold version). */
- void
- connectNewNew (const HalfEdgeIndex& idx_he_ab,
- const HalfEdgeIndex& idx_he_bc,
- const VertexIndex& idx_v_b,
- std::true_type /*is_manifold*/)
- {
- const HalfEdgeIndex idx_he_ba = this->getOppositeHalfEdgeIndex (idx_he_ab);
- const HalfEdgeIndex idx_he_cb = this->getOppositeHalfEdgeIndex (idx_he_bc);
-
- this->connectPrevNext (idx_he_ab, idx_he_bc);
- this->connectPrevNext (idx_he_cb, idx_he_ba);
-
- this->setOutgoingHalfEdgeIndex (idx_v_b, idx_he_ba);
- }
-
- /** \brief Both half-edges are new (non-manifold version). */
- void
- connectNewNew (const HalfEdgeIndex& idx_he_ab,
- const HalfEdgeIndex& idx_he_bc,
- const VertexIndex& idx_v_b,
- std::false_type /*is_manifold*/)
- {
- if (this->isIsolated (idx_v_b))
- {
- this->connectNewNew (idx_he_ab, idx_he_bc, idx_v_b, std::true_type ());
- }
- else
- {
- const HalfEdgeIndex idx_he_ba = this->getOppositeHalfEdgeIndex (idx_he_ab);
- const HalfEdgeIndex idx_he_cb = this->getOppositeHalfEdgeIndex (idx_he_bc);
-
- // No references!
- const HalfEdgeIndex idx_he_b_out = this->getOutgoingHalfEdgeIndex (idx_v_b);
- const HalfEdgeIndex idx_he_b_out_prev = this->getPrevHalfEdgeIndex (idx_he_b_out);
-
- this->connectPrevNext (idx_he_ab, idx_he_bc);
- this->connectPrevNext (idx_he_cb, idx_he_b_out);
- this->connectPrevNext (idx_he_b_out_prev, idx_he_ba);
- }
- }
-
- /** \brief The first half-edge is new. */
- void
- connectNewOld (const HalfEdgeIndex& idx_he_ab,
- const HalfEdgeIndex& idx_he_bc,
- const VertexIndex& idx_v_b)
- {
- const HalfEdgeIndex idx_he_ba = this->getOppositeHalfEdgeIndex (idx_he_ab);
- const HalfEdgeIndex idx_he_bc_prev = this->getPrevHalfEdgeIndex (idx_he_bc); // No reference!
-
- this->connectPrevNext (idx_he_ab, idx_he_bc);
- this->connectPrevNext (idx_he_bc_prev, idx_he_ba);
-
- this->setOutgoingHalfEdgeIndex (idx_v_b, idx_he_ba);
- }
-
- /** \brief The second half-edge is new. */
- void
- connectOldNew (const HalfEdgeIndex& idx_he_ab,
- const HalfEdgeIndex& idx_he_bc,
- const VertexIndex& idx_v_b)
- {
- const HalfEdgeIndex idx_he_cb = this->getOppositeHalfEdgeIndex (idx_he_bc);
- const HalfEdgeIndex idx_he_ab_next = this->getNextHalfEdgeIndex (idx_he_ab); // No reference!
-
- this->connectPrevNext (idx_he_ab, idx_he_bc);
- this->connectPrevNext (idx_he_cb, idx_he_ab_next);
-
- this->setOutgoingHalfEdgeIndex (idx_v_b, idx_he_ab_next);
- }
-
- /** \brief Both half-edges are old (manifold version). */
- void
- connectOldOld (const HalfEdgeIndex& /*idx_he_ab*/,
- const HalfEdgeIndex& /*idx_he_bc*/,
- const VertexIndex& /*idx_v_b*/,
- std::true_type /*is_manifold*/)
- {
- }
-
- /** \brief Both half-edges are old (non-manifold version). */
- void
- connectOldOld (const HalfEdgeIndex& /*idx_he_ab*/,
- const HalfEdgeIndex& idx_he_bc,
- const VertexIndex& idx_v_b,
- std::false_type /*is_manifold*/)
- {
- const HalfEdgeIndex& idx_he_b_out = this->getOutgoingHalfEdgeIndex (idx_v_b);
-
- // The outgoing half edge MUST be a boundary half-edge (if there is one)
- if (idx_he_b_out == idx_he_bc) // he_bc is no longer on the boundary
- {
- OutgoingHalfEdgeAroundVertexCirculator circ = this->getOutgoingHalfEdgeAroundVertexCirculator (idx_he_b_out);
- const OutgoingHalfEdgeAroundVertexCirculator circ_end = circ;
-
- while (++circ!=circ_end)
- {
- if (this->isBoundary (circ.getTargetIndex ()))
- {
- this->setOutgoingHalfEdgeIndex (idx_v_b, circ.getTargetIndex ());
- return;
- }
- }
- }
- }
-
- ////////////////////////////////////////////////////////////////////////
- // addData
- ////////////////////////////////////////////////////////////////////////
-
- /** \brief Add mesh data. */
- template <class DataT>
- inline void
- addData (pcl::PointCloud <DataT>& cloud, const DataT& data, std::true_type /*has_data*/)
- {
- cloud.push_back (data);
- }
-
- /** \brief Does nothing. */
- template <class DataT>
- inline void
- addData (pcl::PointCloud <DataT>& /*cloud*/, const DataT& /*data*/, std::false_type /*has_data*/)
- {
- }
+ return (false);
+ }
- ////////////////////////////////////////////////////////////////////////
- // deleteFace
- ////////////////////////////////////////////////////////////////////////
-
- /** \brief Manifold version of deleteFace. If the mesh becomes non-manifold due to the delete operation the faces around the non-manifold vertex are deleted until the mesh becomes manifold again. */
- void
- deleteFace (const FaceIndex& idx_face,
- std::true_type /*is_manifold*/)
- {
- assert (this->isValid (idx_face));
- delete_faces_face_.clear ();
- delete_faces_face_.push_back (idx_face);
-
- while (!delete_faces_face_.empty ())
- {
- const FaceIndex idx_face_cur = delete_faces_face_.back ();
- delete_faces_face_.pop_back ();
-
- // This calls the non-manifold version of deleteFace, which will call the manifold version of reconnect.
- this->deleteFace (idx_face_cur, std::false_type ());
- }
- }
-
- /** \brief Non-manifold version of deleteFace. */
- void
- deleteFace (const FaceIndex& idx_face,
- std::false_type /*is_manifold*/)
- {
- assert (this->isValid (idx_face));
- if (this->isDeleted (idx_face)) return;
-
- // Store all half-edges in the face
- inner_he_.clear ();
- is_boundary_.clear ();
- InnerHalfEdgeAroundFaceCirculator circ = this->getInnerHalfEdgeAroundFaceCirculator (idx_face);
- const InnerHalfEdgeAroundFaceCirculator circ_end = circ;
- do
- {
- inner_he_.push_back (circ.getTargetIndex ());
- is_boundary_.push_back (this->isBoundary (this->getOppositeHalfEdgeIndex (circ.getTargetIndex ())));
- } while (++circ != circ_end);
- assert (inner_he_.size () >= 3); // Minimum should be a triangle.
-
- const int n = static_cast <int> (inner_he_.size ());
- int j;
-
- if (IsManifold::value)
- {
- for (int i=0; i<n; ++i)
- {
- j = (i+1)%n;
- this->reconnect (inner_he_ [i], inner_he_ [j], is_boundary_ [i], is_boundary_ [j]);
- }
- for (int i=0; i<n; ++i)
- {
- this->getHalfEdge (inner_he_ [i]).idx_face_.invalidate ();
- }
- }
- else
- {
- for (int i=0; i<n; ++i)
- {
- j = (i+1)%n;
- this->reconnect (inner_he_ [i], inner_he_ [j], is_boundary_ [i], is_boundary_ [j]);
- this->getHalfEdge (inner_he_ [i]).idx_face_.invalidate ();
- }
- }
-
- this->markDeleted (idx_face);
- }
-
- ////////////////////////////////////////////////////////////////////////
- // reconnect
- ////////////////////////////////////////////////////////////////////////
-
- /** \brief Deconnect the input half-edges from the mesh and adjust the indices of the connected half-edges. */
- void
- reconnect (const HalfEdgeIndex& idx_he_ab,
- const HalfEdgeIndex& idx_he_bc,
- const bool is_boundary_ba,
- const bool is_boundary_cb)
- {
- const HalfEdgeIndex idx_he_ba = this->getOppositeHalfEdgeIndex (idx_he_ab);
- const HalfEdgeIndex idx_he_cb = this->getOppositeHalfEdgeIndex (idx_he_bc);
- const VertexIndex idx_v_b = this->getTerminatingVertexIndex (idx_he_ab);
-
- if (is_boundary_ba && is_boundary_cb) // boundary - boundary
- {
- const HalfEdgeIndex& idx_he_cb_next = this->getNextHalfEdgeIndex (idx_he_cb);
-
- if (idx_he_cb_next == idx_he_ba) // Vertex b is isolated
- {
- this->markDeleted (idx_v_b);
- }
- else
- {
- this->connectPrevNext (this->getPrevHalfEdgeIndex (idx_he_ba), idx_he_cb_next);
- this->setOutgoingHalfEdgeIndex (idx_v_b, idx_he_cb_next);
- }
-
- this->markDeleted (idx_he_ab);
- this->markDeleted (idx_he_ba);
- }
- else if (is_boundary_ba && !is_boundary_cb) // boundary - no boundary
- {
- this->connectPrevNext (this->getPrevHalfEdgeIndex (idx_he_ba), idx_he_bc);
- this->setOutgoingHalfEdgeIndex (idx_v_b, idx_he_bc);
-
- this->markDeleted (idx_he_ab);
- this->markDeleted (idx_he_ba);
- }
- else if (!is_boundary_ba && is_boundary_cb) // no boundary - boundary
- {
- const HalfEdgeIndex& idx_he_cb_next = this->getNextHalfEdgeIndex (idx_he_cb);
- this->connectPrevNext (idx_he_ab, idx_he_cb_next);
- this->setOutgoingHalfEdgeIndex (idx_v_b, idx_he_cb_next);
- }
- else // no boundary - no boundary
- {
- this->reconnectNBNB (idx_he_bc, idx_he_cb, idx_v_b, IsManifold ());
- }
- }
-
- /** \brief Both edges are not on the boundary. Manifold version. */
- void
- reconnectNBNB (const HalfEdgeIndex& idx_he_bc,
- const HalfEdgeIndex& idx_he_cb,
- const VertexIndex& idx_v_b,
- std::true_type /*is_manifold*/)
- {
- if (this->isBoundary (idx_v_b))
- {
- // Deletion of this face makes the mesh non-manifold
- // -> delete the neighboring faces until it is manifold again
- IncomingHalfEdgeAroundVertexCirculator circ = this->getIncomingHalfEdgeAroundVertexCirculator (idx_he_cb);
-
- while (!this->isBoundary (circ.getTargetIndex ()))
- {
- delete_faces_face_.push_back (this->getFaceIndex ((circ++).getTargetIndex ()));
-
-#ifdef PCL_GEOMETRY_MESH_BASE_TEST_DELETE_FACE_MANIFOLD_2
- if (circ == this->getIncomingHalfEdgeAroundVertexCirculator (idx_he_cb)) // Abort infinity loop
- {
- // In a manifold mesh we can't invalidate the face while reconnecting!
- // See the implementation of
- // deleteFace (const FaceIndex& idx_face,
- // std::false_type /*is_manifold*/)
- pcl::geometry::g_pcl_geometry_mesh_base_test_delete_face_manifold_2_success = false;
- return;
- }
-#endif
- }
- }
- else
- {
- this->setOutgoingHalfEdgeIndex (idx_v_b, idx_he_bc);
- }
- }
-
- /** \brief Both edges are not on the boundary. Non-manifold version. */
- void
- reconnectNBNB (const HalfEdgeIndex& idx_he_bc,
- const HalfEdgeIndex& /*idx_he_cb*/,
- const VertexIndex& idx_v_b,
- std::false_type /*is_manifold*/)
- {
- if (!this->isBoundary (idx_v_b))
- {
- this->setOutgoingHalfEdgeIndex (idx_v_b, idx_he_bc);
- }
- }
-
- ////////////////////////////////////////////////////////////////////////
- // markDeleted
- ////////////////////////////////////////////////////////////////////////
-
- /** \brief Mark the given vertex as deleted. */
- inline void
- markDeleted (const VertexIndex& idx_vertex)
- {
- assert (this->isValid (idx_vertex));
- this->getVertex (idx_vertex).idx_outgoing_half_edge_.invalidate ();
- }
-
- /** \brief Mark the given half-edge as deleted. */
- inline void
- markDeleted (const HalfEdgeIndex& idx_he)
- {
- assert (this->isValid (idx_he));
- this->getHalfEdge (idx_he).idx_terminating_vertex_.invalidate ();
- }
-
- /** \brief Mark the given edge (both half-edges) as deleted. */
- inline void
- markDeleted (const EdgeIndex& idx_edge)
- {
- assert (this->isValid (idx_edge));
- this->markDeleted (pcl::geometry::toHalfEdgeIndex (idx_edge, true));
- this->markDeleted (pcl::geometry::toHalfEdgeIndex (idx_edge, false));
- }
-
- /** \brief Mark the given face as deleted. */
- inline void
- markDeleted (const FaceIndex& idx_face)
- {
- assert (this->isValid (idx_face));
- this->getFace (idx_face).idx_inner_half_edge_.invalidate ();
- }
-
- ////////////////////////////////////////////////////////////////////////
- // For cleanUp
- ////////////////////////////////////////////////////////////////////////
-
- /** \brief Removes mesh elements and data that are marked as deleted from the container.
- * \tparam ElementContainerT e.g. std::vector \<Vertex\>
- * \tparam DataContainerT e.g. std::vector \<VertexData\>
- * \tparam IndexContainerT e.g. std::vector \<VertexIndex\>
- * \tparam HasDataT Integral constant specifying if the mesh has data associated with the elements.
- *
- * \param[in, out] elements Container for the mesh elements. Resized to the new size.
- * \param[in, out] data_cloud Container for the mesh data. Resized to the new size.
- * \return Container with the same size as the old input data. Holds the indices to the new elements for each non-deleted element and an invalid index if it is deleted.
- */
- template <class ElementContainerT, class DataContainerT, class IndexContainerT, class HasDataT> IndexContainerT
- remove (ElementContainerT& elements, DataContainerT& data_cloud)
- {
- using Index = typename IndexContainerT::value_type;
- using Element = typename ElementContainerT::value_type;
-
- if (HasDataT::value) assert (elements.size () == data_cloud.size ());
- else assert (data_cloud.empty ()); // Bug in this class!
-
- IndexContainerT new_indices (elements.size (), typename IndexContainerT::value_type ());
- Index ind_old (0), ind_new (0);
-
- typename ElementContainerT::const_iterator it_e_old = elements.begin ();
- typename ElementContainerT::iterator it_e_new = elements.begin ();
-
- typename DataContainerT::const_iterator it_d_old = data_cloud.begin ();
- typename DataContainerT::iterator it_d_new = data_cloud.begin ();
-
- typename IndexContainerT::iterator it_ind_new = new_indices.begin ();
- typename IndexContainerT::const_iterator it_ind_new_end = new_indices.end ();
-
- while (it_ind_new!=it_ind_new_end)
- {
- if (!this->isDeleted (ind_old))
- {
- *it_ind_new = ind_new++;
-
- // TODO: Test for self assignment?
- *it_e_new++ = *it_e_old;
- this->assignIf (it_d_old, it_d_new, HasDataT ());
- this->incrementIf ( it_d_new, HasDataT ());
- }
- ++ind_old;
- ++it_e_old;
- this->incrementIf (it_d_old, HasDataT ());
- ++it_ind_new;
- }
-
- elements.resize (ind_new.get (), Element ());
- if (HasDataT::value)
- {
- data_cloud.resize (ind_new.get ());
- }
- else if (it_d_old != data_cloud.begin () || it_d_new != data_cloud.begin ())
- {
- std::cerr << "TODO: Bug in MeshBase::remove!\n";
- assert (false);
- exit (EXIT_FAILURE);
- }
-
- return (new_indices);
- }
-
- /** \brief Increment the iterator. */
- template <class IteratorT> inline void
- incrementIf (IteratorT& it, std::true_type /*has_data*/) const
- {
- ++it;
- }
-
- /** \brief Does nothing. */
- template <class IteratorT> inline void
- incrementIf (IteratorT& /*it*/, std::false_type /*has_data*/) const
- {
- }
-
- /** \brief Assign the source iterator to the target iterator. */
- template <class ConstIteratorT, class IteratorT> inline void
- assignIf (const ConstIteratorT source, IteratorT target, std::true_type /*has_data*/) const
- {
- *target = *source;
- }
-
- /** \brief Does nothing. */
- template <class ConstIteratorT, class IteratorT> inline void
- assignIf (const ConstIteratorT /*source*/, IteratorT /*target*/, std::false_type /*has_data*/) const
- {
- }
-
- ////////////////////////////////////////////////////////////////////////
- // Vertex / Half-edge / Face connectivity
- ////////////////////////////////////////////////////////////////////////
-
- /** \brief Set the outgoing half-edge index to a given vertex. */
- inline void
- setOutgoingHalfEdgeIndex (const VertexIndex& idx_vertex, const HalfEdgeIndex& idx_outgoing_half_edge)
- {
- assert (this->isValid (idx_vertex));
- this->getVertex (idx_vertex).idx_outgoing_half_edge_ = idx_outgoing_half_edge;
- }
-
- /** \brief Set the terminating vertex index to a given half-edge. */
- inline void
- setTerminatingVertexIndex (const HalfEdgeIndex& idx_half_edge, const VertexIndex& idx_terminating_vertex)
- {
- assert (this->isValid (idx_half_edge));
- this->getHalfEdge (idx_half_edge).idx_terminating_vertex_ = idx_terminating_vertex;
- }
-
- /** \brief Set the next half_edge index to a given half-edge. */
- inline void
- setNextHalfEdgeIndex (const HalfEdgeIndex& idx_half_edge, const HalfEdgeIndex& idx_next_half_edge)
- {
- assert (this->isValid (idx_half_edge));
- this->getHalfEdge (idx_half_edge).idx_next_half_edge_ = idx_next_half_edge;
- }
-
- /** \brief Set the previous half-edge index to a given half-edge. */
- inline void
- setPrevHalfEdgeIndex (const HalfEdgeIndex& idx_half_edge,
- const HalfEdgeIndex& idx_prev_half_edge)
- {
- assert (this->isValid (idx_half_edge));
- this->getHalfEdge (idx_half_edge).idx_prev_half_edge_ = idx_prev_half_edge;
- }
-
- /** \brief Set the face index to a given half-edge. */
- inline void
- setFaceIndex (const HalfEdgeIndex& idx_half_edge, const FaceIndex& idx_face)
- {
- assert (this->isValid (idx_half_edge));
- this->getHalfEdge (idx_half_edge).idx_face_ = idx_face;
- }
-
- /** \brief Set the inner half-edge index to a given face. */
- inline void
- setInnerHalfEdgeIndex (const FaceIndex& idx_face, const HalfEdgeIndex& idx_inner_half_edge)
- {
- assert (this->isValid (idx_face));
- this->getFace (idx_face).idx_inner_half_edge_ = idx_inner_half_edge;
- }
-
- ////////////////////////////////////////////////////////////////////////
- // isBoundary / isManifold
- ////////////////////////////////////////////////////////////////////////
-
- /** \brief Check if any vertex of the face lies on the boundary. */
- bool
- isBoundary (const FaceIndex& idx_face, std::true_type /*check_vertices*/) const
- {
- VertexAroundFaceCirculator circ = this->getVertexAroundFaceCirculator (idx_face);
- const VertexAroundFaceCirculator circ_end = circ;
-
- do
- {
- if (this->isBoundary (circ.getTargetIndex ()))
- {
- return (true);
- }
- } while (++circ!=circ_end);
-
- return (false);
- }
-
- /** \brief Check if any edge of the face lies on the boundary. */
- bool
- isBoundary (const FaceIndex& idx_face, std::false_type /*check_vertices*/) const
- {
- OuterHalfEdgeAroundFaceCirculator circ = this->getOuterHalfEdgeAroundFaceCirculator (idx_face);
- const OuterHalfEdgeAroundFaceCirculator circ_end = circ;
-
- do
- {
- if (this->isBoundary (circ.getTargetIndex ()))
- {
- return (true);
- }
- } while (++circ!=circ_end);
-
- return (false);
- }
-
- /** \brief Always manifold. */
- inline bool
- isManifold (const VertexIndex&, std::true_type /*is_manifold*/) const
- {
- return (true);
- }
-
- /** \brief Check if the given vertex is manifold. */
- bool
- isManifold (const VertexIndex& idx_vertex, std::false_type /*is_manifold*/) const
- {
- OutgoingHalfEdgeAroundVertexCirculator circ = this->getOutgoingHalfEdgeAroundVertexCirculator (idx_vertex);
- const OutgoingHalfEdgeAroundVertexCirculator circ_end = circ;
-
- if (!this->isBoundary ((circ++).getTargetIndex ())) return (true);
- do
- {
- if (this->isBoundary (circ.getTargetIndex ())) return (false);
- } while (++circ != circ_end);
-
- return (true);
- }
-
- /** \brief Always manifold. */
- inline bool
- isManifold (std::true_type /*is_manifold*/) const
- {
- return (true);
- }
-
- /** \brief Check if all vertices in the mesh are manifold. */
- bool
- isManifold (std::false_type /*is_manifold*/) const
- {
- for (std::size_t i=0; i<this->sizeVertices (); ++i)
- {
- if (!this->isManifold (VertexIndex (i))) return (false);
- }
- return (true);
- }
-
- ////////////////////////////////////////////////////////////////////////
- // reserveData / resizeData / clearData
- ////////////////////////////////////////////////////////////////////////
-
- /** \brief Reserve storage space for the mesh data. */
- template <class DataCloudT> inline void
- reserveData (DataCloudT& cloud, const std::size_t n, std::true_type /*has_data*/) const
- {
- cloud.reserve (n);
- }
-
- /** \brief Does nothing */
- template <class DataCloudT> inline void
- reserveData (DataCloudT& /*cloud*/, const std::size_t /*n*/, std::false_type /*has_data*/) const
- {
- }
-
- /** \brief Resize the mesh data. */
- template <class DataCloudT> inline void
- resizeData (DataCloudT& /*data_cloud*/, const std::size_t n, const typename DataCloudT::value_type& data, std::true_type /*has_data*/) const
- {
- data.resize (n, data);
- }
-
- /** \brief Does nothing. */
- template <class DataCloudT> inline void
- resizeData (DataCloudT& /*data_cloud*/, const std::size_t /*n*/, const typename DataCloudT::value_type& /*data*/, std::false_type /*has_data*/) const
- {
- }
-
- /** \brief Clear the mesh data. */
- template <class DataCloudT> inline void
- clearData (DataCloudT& cloud, std::true_type /*has_data*/) const
- {
- cloud.clear ();
- }
+ /** \brief Check if any edge of the face lies on the boundary. */
+ bool
+ isBoundary(const FaceIndex& idx_face, std::false_type /*check_vertices*/) const
+ {
+ OuterHalfEdgeAroundFaceCirculator circ =
+ this->getOuterHalfEdgeAroundFaceCirculator(idx_face);
+ const OuterHalfEdgeAroundFaceCirculator circ_end = circ;
+
+ do {
+ if (this->isBoundary(circ.getTargetIndex())) {
+ return (true);
+ }
+ } while (++circ != circ_end);
+
+ return (false);
+ }
+
+ /** \brief Always manifold. */
+ inline bool
+ isManifold(const VertexIndex&, std::true_type /*is_manifold*/) const
+ {
+ return (true);
+ }
- /** \brief Does nothing. */
- template <class DataCloudT> inline void
- clearData (DataCloudT& /*cloud*/, std::false_type /*has_data*/) const
- {
- }
+ /** \brief Check if the given vertex is manifold. */
+ bool
+ isManifold(const VertexIndex& idx_vertex, std::false_type /*is_manifold*/) const
+ {
+ OutgoingHalfEdgeAroundVertexCirculator circ =
+ this->getOutgoingHalfEdgeAroundVertexCirculator(idx_vertex);
+ const OutgoingHalfEdgeAroundVertexCirculator circ_end = circ;
- ////////////////////////////////////////////////////////////////////////
- // get / set Vertex
- ////////////////////////////////////////////////////////////////////////
+ if (!this->isBoundary((circ++).getTargetIndex()))
+ return (true);
+ do {
+ if (this->isBoundary(circ.getTargetIndex()))
+ return (false);
+ } while (++circ != circ_end);
- /** \brief Get the vertex for the given index. */
- inline Vertex&
- getVertex (const VertexIndex& idx_vertex)
- {
- assert (this->isValid (idx_vertex));
- return (vertices_ [idx_vertex.get ()]);
- }
+ return (true);
+ }
- /** \brief Get the vertex for the given index. */
- inline Vertex
- getVertex (const VertexIndex& idx_vertex) const
- {
- assert (this->isValid (idx_vertex));
- return (vertices_ [idx_vertex.get ()]);
- }
+ /** \brief Always manifold. */
+ inline bool isManifold(std::true_type /*is_manifold*/) const { return (true); }
- /** \brief Set the vertex at the given index. */
- inline void
- setVertex (const VertexIndex& idx_vertex, const Vertex& vertex)
- {
- assert (this->isValid (idx_vertex));
- vertices_ [idx_vertex.get ()] = vertex;
- }
+ /** \brief Check if all vertices in the mesh are manifold. */
+ bool isManifold(std::false_type /*is_manifold*/) const
+ {
+ for (std::size_t i = 0; i < this->sizeVertices(); ++i) {
+ if (!this->isManifold(VertexIndex(i)))
+ return (false);
+ }
+ return (true);
+ }
+
+ ////////////////////////////////////////////////////////////////////////
+ // reserveData / resizeData / clearData
+ ////////////////////////////////////////////////////////////////////////
+
+ /** \brief Reserve storage space for the mesh data. */
+ template <class DataCloudT>
+ inline void
+ reserveData(DataCloudT& cloud, const std::size_t n, std::true_type /*has_data*/) const
+ {
+ cloud.reserve(n);
+ }
+
+ /** \brief Does nothing */
+ template <class DataCloudT>
+ inline void
+ reserveData(DataCloudT& /*cloud*/,
+ const std::size_t /*n*/,
+ std::false_type /*has_data*/) const
+ {}
+
+ /** \brief Resize the mesh data. */
+ template <class DataCloudT>
+ inline void
+ resizeData(DataCloudT& /*data_cloud*/,
+ const std::size_t n,
+ const typename DataCloudT::value_type& data,
+ std::true_type /*has_data*/) const
+ {
+ data.resize(n, data);
+ }
+
+ /** \brief Does nothing. */
+ template <class DataCloudT>
+ inline void
+ resizeData(DataCloudT& /*data_cloud*/,
+ const std::size_t /*n*/,
+ const typename DataCloudT::value_type& /*data*/,
+ std::false_type /*has_data*/) const
+ {}
+
+ /** \brief Clear the mesh data. */
+ template <class DataCloudT>
+ inline void
+ clearData(DataCloudT& cloud, std::true_type /*has_data*/) const
+ {
+ cloud.clear();
+ }
+
+ /** \brief Does nothing. */
+ template <class DataCloudT>
+ inline void
+ clearData(DataCloudT& /*cloud*/, std::false_type /*has_data*/) const
+ {}
+
+ ////////////////////////////////////////////////////////////////////////
+ // get / set Vertex
+ ////////////////////////////////////////////////////////////////////////
+
+ /** \brief Get the vertex for the given index. */
+ inline Vertex&
+ getVertex(const VertexIndex& idx_vertex)
+ {
+ assert(this->isValid(idx_vertex));
+ return (vertices_[idx_vertex.get()]);
+ }
- ////////////////////////////////////////////////////////////////////////
- // get / set HalfEdge
- ////////////////////////////////////////////////////////////////////////
+ /** \brief Get the vertex for the given index. */
+ inline Vertex
+ getVertex(const VertexIndex& idx_vertex) const
+ {
+ assert(this->isValid(idx_vertex));
+ return (vertices_[idx_vertex.get()]);
+ }
- /** \brief Get the half-edge for the given index. */
- inline HalfEdge&
- getHalfEdge (const HalfEdgeIndex& idx_he)
- {
- assert (this->isValid (idx_he));
- return (half_edges_ [idx_he.get ()]);
- }
+ /** \brief Set the vertex at the given index. */
+ inline void
+ setVertex(const VertexIndex& idx_vertex, const Vertex& vertex)
+ {
+ assert(this->isValid(idx_vertex));
+ vertices_[idx_vertex.get()] = vertex;
+ }
- /** \brief Get the half-edge for the given index. */
- inline HalfEdge
- getHalfEdge (const HalfEdgeIndex& idx_he) const
- {
- assert (this->isValid (idx_he));
- return (half_edges_ [idx_he.get ()]);
- }
+ ////////////////////////////////////////////////////////////////////////
+ // get / set HalfEdge
+ ////////////////////////////////////////////////////////////////////////
- /** \brief Set the half-edge at the given index. */
- inline void
- setHalfEdge (const HalfEdgeIndex& idx_he, const HalfEdge& half_edge)
- {
- assert (this->isValid (idx_he));
- half_edges_ [idx_he.get ()] = half_edge;
- }
+ /** \brief Get the half-edge for the given index. */
+ inline HalfEdge&
+ getHalfEdge(const HalfEdgeIndex& idx_he)
+ {
+ assert(this->isValid(idx_he));
+ return (half_edges_[idx_he.get()]);
+ }
- ////////////////////////////////////////////////////////////////////////
- // get / set Face
- ////////////////////////////////////////////////////////////////////////
+ /** \brief Get the half-edge for the given index. */
+ inline HalfEdge
+ getHalfEdge(const HalfEdgeIndex& idx_he) const
+ {
+ assert(this->isValid(idx_he));
+ return (half_edges_[idx_he.get()]);
+ }
- /** \brief Get the face for the given index. */
- inline Face&
- getFace (const FaceIndex& idx_face)
- {
- assert (this->isValid (idx_face));
- return (faces_ [idx_face.get ()]);
- }
+ /** \brief Set the half-edge at the given index. */
+ inline void
+ setHalfEdge(const HalfEdgeIndex& idx_he, const HalfEdge& half_edge)
+ {
+ assert(this->isValid(idx_he));
+ half_edges_[idx_he.get()] = half_edge;
+ }
- /** \brief Get the face for the given index. */
- inline Face
- getFace (const FaceIndex& idx_face) const
- {
- assert (this->isValid (idx_face));
- return (faces_ [idx_face.get ()]);
- }
+ ////////////////////////////////////////////////////////////////////////
+ // get / set Face
+ ////////////////////////////////////////////////////////////////////////
- /** \brief Set the face at the given index. */
- inline void
- setFace (const FaceIndex& idx_face, const Face& face)
- {
- assert (this->isValid (idx_face));
- faces_ [idx_face.get ()] = face;
- }
+ /** \brief Get the face for the given index. */
+ inline Face&
+ getFace(const FaceIndex& idx_face)
+ {
+ assert(this->isValid(idx_face));
+ return (faces_[idx_face.get()]);
+ }
- private:
+ /** \brief Get the face for the given index. */
+ inline Face
+ getFace(const FaceIndex& idx_face) const
+ {
+ assert(this->isValid(idx_face));
+ return (faces_[idx_face.get()]);
+ }
- ////////////////////////////////////////////////////////////////////////
- // Members
- ////////////////////////////////////////////////////////////////////////
+ /** \brief Set the face at the given index. */
+ inline void
+ setFace(const FaceIndex& idx_face, const Face& face)
+ {
+ assert(this->isValid(idx_face));
+ faces_[idx_face.get()] = face;
+ }
- /** \brief Data stored for the vertices. */
- VertexDataCloud vertex_data_cloud_;
+private:
+ ////////////////////////////////////////////////////////////////////////
+ // Members
+ ////////////////////////////////////////////////////////////////////////
- /** \brief Data stored for the half-edges. */
- HalfEdgeDataCloud half_edge_data_cloud_;
+ /** \brief Data stored for the vertices. */
+ VertexDataCloud vertex_data_cloud_;
- /** \brief Data stored for the edges. */
- EdgeDataCloud edge_data_cloud_;
+ /** \brief Data stored for the half-edges. */
+ HalfEdgeDataCloud half_edge_data_cloud_;
- /** \brief Data stored for the faces. */
- FaceDataCloud face_data_cloud_;
+ /** \brief Data stored for the edges. */
+ EdgeDataCloud edge_data_cloud_;
- /** \brief Connectivity information for the vertices. */
- Vertices vertices_;
+ /** \brief Data stored for the faces. */
+ FaceDataCloud face_data_cloud_;
- /** \brief Connectivity information for the half-edges. */
- HalfEdges half_edges_;
+ /** \brief Connectivity information for the vertices. */
+ Vertices vertices_;
- /** \brief Connectivity information for the faces. */
- Faces faces_;
+ /** \brief Connectivity information for the half-edges. */
+ HalfEdges half_edges_;
- // NOTE: It is MUCH faster to store these variables permamently.
+ /** \brief Connectivity information for the faces. */
+ Faces faces_;
- /** \brief Storage for addFaceImplBase and deleteFace. */
- HalfEdgeIndices inner_he_;
+ // NOTE: It is MUCH faster to store these variables permamently.
- /** \brief Storage for addFaceImplBase. */
- HalfEdgeIndices free_he_;
+ /** \brief Storage for addFaceImplBase and deleteFace. */
+ HalfEdgeIndices inner_he_;
- /** \brief Storage for addFaceImplBase. */
- std::vector <bool> is_new_;
+ /** \brief Storage for addFaceImplBase. */
+ HalfEdgeIndices free_he_;
- /** \brief Storage for addFaceImplBase. */
- std::vector <bool> make_adjacent_;
+ /** \brief Storage for addFaceImplBase. */
+ std::vector<bool> is_new_;
- /** \brief Storage for deleteFace. */
- std::vector <bool> is_boundary_;
+ /** \brief Storage for addFaceImplBase. */
+ std::vector<bool> make_adjacent_;
- /** \brief Storage for deleteVertex. */
- FaceIndices delete_faces_vertex_;
+ /** \brief Storage for deleteFace. */
+ std::vector<bool> is_boundary_;
- /** \brief Storage for deleteFace. */
- FaceIndices delete_faces_face_;
+ /** \brief Storage for deleteVertex. */
+ FaceIndices delete_faces_vertex_;
- public:
+ /** \brief Storage for deleteFace. */
+ FaceIndices delete_faces_face_;
- template <class MeshT>
- friend class pcl::geometry::MeshIO;
+public:
+ template <class MeshT>
+ friend class pcl::geometry::MeshIO;
- PCL_MAKE_ALIGNED_OPERATOR_NEW
- };
- } // End namespace geometry
+ PCL_MAKE_ALIGNED_OPERATOR_NEW
+};
+} // End namespace geometry
} // End namespace pcl
*
*/
-// NOTE: This file has been created with 'pcl_src/geometry/include/pcl/geometry/mesh_circulators.py'
+// NOTE: This file has been created with
+// 'pcl_src/geometry/include/pcl/geometry/mesh_circulators.py'
#pragma once
// VertexAroundVertexCirculator
////////////////////////////////////////////////////////////////////////////////
-namespace pcl
-{
- namespace geometry
- {
- /** \brief Circulates counter-clockwise around a vertex and returns an index to the terminating vertex of the outgoing half-edge (the target). The best way to declare the circulator is to use the method pcl::geometry::MeshBase::getVertexAroundVertexCirculator ().
- * \tparam MeshT Mesh to which this circulator belongs to.
- * \note The circulator can't be used to change the connectivity in the mesh (only const circulators are valid).
- * \author Martin Saelzle
- * \ingroup geometry
- */
- template <class MeshT>
- class VertexAroundVertexCirculator
- : boost::equality_comparable <pcl::geometry::VertexAroundVertexCirculator <MeshT>
- , boost::unit_steppable <pcl::geometry::VertexAroundVertexCirculator <MeshT>
- > >
- {
- public:
-
- using Base = boost::equality_comparable <pcl::geometry::VertexAroundVertexCirculator <MeshT>,
- boost::unit_steppable <pcl::geometry::VertexAroundVertexCirculator <MeshT> > >;
- using Self = pcl::geometry::VertexAroundVertexCirculator<MeshT>;
-
- using Mesh = MeshT;
- using VertexIndex = typename Mesh::VertexIndex;
- using HalfEdgeIndex = typename Mesh::HalfEdgeIndex;
-
- /** \brief Constructor resulting in an invalid circulator. */
- VertexAroundVertexCirculator ()
- : mesh_ (nullptr),
- idx_outgoing_half_edge_ ()
- {
- }
-
- /** \brief Construct from the vertex around which we want to circulate. */
- VertexAroundVertexCirculator (const VertexIndex& idx_vertex,
- Mesh*const mesh)
- : mesh_ (mesh),
- idx_outgoing_half_edge_ (mesh->getOutgoingHalfEdgeIndex (idx_vertex))
- {
- }
-
- /** \brief Construct directly from the outgoing half-edge. */
- VertexAroundVertexCirculator (const HalfEdgeIndex& idx_outgoing_half_edge,
- Mesh*const mesh)
- : mesh_ (mesh),
- idx_outgoing_half_edge_ (idx_outgoing_half_edge)
- {
- }
-
- /** \brief Check if the circulator is valid.
- * \warning Does NOT check if the stored mesh pointer is valid. You have to ensure this yourself when constructing the circulator. */
- inline bool
- isValid () const
- {
- return (idx_outgoing_half_edge_.isValid ());
- }
-
- /** \brief Comparison operators (with boost::operators): == !=
- * \warning Does NOT check if the circulators belong to the same mesh. Please check this yourself. */
- inline bool
- operator == (const Self& other) const
- {
- return (idx_outgoing_half_edge_ == other.idx_outgoing_half_edge_);
- }
-
- /** \brief Increment operators (with boost::operators): ++ (pre and post) */
- inline Self&
- operator ++ ()
- {
- idx_outgoing_half_edge_ = mesh_->getNextHalfEdgeIndex (mesh_->getOppositeHalfEdgeIndex (idx_outgoing_half_edge_));
- return (*this);
- }
-
- /** \brief Decrement operators (with boost::operators): -- (pre and post) */
- inline Self&
- operator -- ()
- {
- idx_outgoing_half_edge_ = mesh_->getOppositeHalfEdgeIndex (mesh_->getPrevHalfEdgeIndex (idx_outgoing_half_edge_));
- return (*this);
- }
-
- /** \brief Get the index to the target vertex. */
- inline VertexIndex
- getTargetIndex () const
- {
- return (mesh_->getTerminatingVertexIndex (idx_outgoing_half_edge_));
- }
-
- /** \brief Get the half-edge that is currently stored in the circulator. */
- inline HalfEdgeIndex
- getCurrentHalfEdgeIndex () const
- {
- return (idx_outgoing_half_edge_);
- }
-
- /** \brief The mesh to which this circulator belongs to. */
- Mesh* mesh_;
-
- /** \brief The outgoing half-edge of the vertex around which we want to circulate. */
- HalfEdgeIndex idx_outgoing_half_edge_;
- };
- } // End namespace geometry
+namespace pcl {
+namespace geometry {
+/** \brief Circulates counter-clockwise around a vertex and returns an index to the
+ * terminating vertex of the outgoing half-edge (the target). The best way to declare
+ * the circulator is to use the method
+ * pcl::geometry::MeshBase::getVertexAroundVertexCirculator ().
+ * \tparam MeshT Mesh to which this circulator belongs to.
+ * \note The circulator can't be used to change the connectivity in the mesh (only
+ * const circulators are valid).
+ * \author Martin Saelzle
+ * \ingroup geometry
+ */
+template <class MeshT>
+class VertexAroundVertexCirculator
+: boost::equality_comparable<
+ pcl::geometry::VertexAroundVertexCirculator<MeshT>,
+ boost::unit_steppable<pcl::geometry::VertexAroundVertexCirculator<MeshT>>> {
+public:
+ using Base = boost::equality_comparable<
+ pcl::geometry::VertexAroundVertexCirculator<MeshT>,
+ boost::unit_steppable<pcl::geometry::VertexAroundVertexCirculator<MeshT>>>;
+ using Self = pcl::geometry::VertexAroundVertexCirculator<MeshT>;
+
+ using Mesh = MeshT;
+ using VertexIndex = typename Mesh::VertexIndex;
+ using HalfEdgeIndex = typename Mesh::HalfEdgeIndex;
+
+ /** \brief Constructor resulting in an invalid circulator. */
+ VertexAroundVertexCirculator() : mesh_(nullptr), idx_outgoing_half_edge_() {}
+
+ /** \brief Construct from the vertex around which we want to circulate. */
+ VertexAroundVertexCirculator(const VertexIndex& idx_vertex, Mesh* const mesh)
+ : mesh_(mesh), idx_outgoing_half_edge_(mesh->getOutgoingHalfEdgeIndex(idx_vertex))
+ {}
+
+ /** \brief Construct directly from the outgoing half-edge. */
+ VertexAroundVertexCirculator(const HalfEdgeIndex& idx_outgoing_half_edge,
+ Mesh* const mesh)
+ : mesh_(mesh), idx_outgoing_half_edge_(idx_outgoing_half_edge)
+ {}
+
+ /** \brief Check if the circulator is valid.
+ * \warning Does NOT check if the stored mesh pointer is valid. You have to ensure
+ * this yourself when constructing the circulator. */
+ inline bool
+ isValid() const
+ {
+ return (idx_outgoing_half_edge_.isValid());
+ }
+
+ /** \brief Comparison operators (with boost::operators): == !=
+ * \warning Does NOT check if the circulators belong to the same mesh. Please check
+ * this yourself. */
+ inline bool
+ operator==(const Self& other) const
+ {
+ return (idx_outgoing_half_edge_ == other.idx_outgoing_half_edge_);
+ }
+
+ /** \brief Increment operators (with boost::operators): ++ (pre and post) */
+ inline Self&
+ operator++()
+ {
+ idx_outgoing_half_edge_ = mesh_->getNextHalfEdgeIndex(
+ mesh_->getOppositeHalfEdgeIndex(idx_outgoing_half_edge_));
+ return (*this);
+ }
+
+ /** \brief Decrement operators (with boost::operators): -- (pre and post) */
+ inline Self&
+ operator--()
+ {
+ idx_outgoing_half_edge_ = mesh_->getOppositeHalfEdgeIndex(
+ mesh_->getPrevHalfEdgeIndex(idx_outgoing_half_edge_));
+ return (*this);
+ }
+
+ /** \brief Get the index to the target vertex. */
+ inline VertexIndex
+ getTargetIndex() const
+ {
+ return (mesh_->getTerminatingVertexIndex(idx_outgoing_half_edge_));
+ }
+
+ /** \brief Get the half-edge that is currently stored in the circulator. */
+ inline HalfEdgeIndex
+ getCurrentHalfEdgeIndex() const
+ {
+ return (idx_outgoing_half_edge_);
+ }
+
+ /** \brief The mesh to which this circulator belongs to. */
+ Mesh* mesh_;
+
+ /** \brief The outgoing half-edge of the vertex around which we want to circulate. */
+ HalfEdgeIndex idx_outgoing_half_edge_;
+};
+} // End namespace geometry
} // End namespace pcl
////////////////////////////////////////////////////////////////////////////////
// OutgoingHalfEdgeAroundVertexCirculator
////////////////////////////////////////////////////////////////////////////////
-namespace pcl
-{
- namespace geometry
- {
- /** \brief Circulates counter-clockwise around a vertex and returns an index to the outgoing half-edge (the target). The best way to declare the circulator is to use the method pcl::geometry::MeshBase::getOutgoingHalfEdgeAroundVertexCirculator ().
- * \tparam MeshT Mesh to which this circulator belongs to.
- * \note The circulator can't be used to change the connectivity in the mesh (only const circulators are valid).
- * \author Martin Saelzle
- * \ingroup geometry
- */
- template <class MeshT>
- class OutgoingHalfEdgeAroundVertexCirculator
- : boost::equality_comparable <pcl::geometry::OutgoingHalfEdgeAroundVertexCirculator <MeshT>
- , boost::unit_steppable <pcl::geometry::OutgoingHalfEdgeAroundVertexCirculator <MeshT>
- > >
- {
- public:
-
- using Base = boost::equality_comparable <pcl::geometry::OutgoingHalfEdgeAroundVertexCirculator <MeshT>,
- boost::unit_steppable <pcl::geometry::OutgoingHalfEdgeAroundVertexCirculator <MeshT> > >;
- using Self = pcl::geometry::OutgoingHalfEdgeAroundVertexCirculator<MeshT>;
-
- using Mesh = MeshT;
- using VertexIndex = typename Mesh::VertexIndex;
- using HalfEdgeIndex = typename Mesh::HalfEdgeIndex;
-
- /** \brief Constructor resulting in an invalid circulator. */
- OutgoingHalfEdgeAroundVertexCirculator ()
- : mesh_ (nullptr),
- idx_outgoing_half_edge_ ()
- {
- }
-
- /** \brief Construct from the vertex around which we want to circulate. */
- OutgoingHalfEdgeAroundVertexCirculator (const VertexIndex& idx_vertex,
- Mesh*const mesh)
- : mesh_ (mesh),
- idx_outgoing_half_edge_ (mesh->getOutgoingHalfEdgeIndex (idx_vertex))
- {
- }
-
- /** \brief Construct directly from the outgoing half-edge. */
- OutgoingHalfEdgeAroundVertexCirculator (const HalfEdgeIndex& idx_outgoing_half_edge,
- Mesh*const mesh)
- : mesh_ (mesh),
- idx_outgoing_half_edge_ (idx_outgoing_half_edge)
- {
- }
-
- /** \brief Check if the circulator is valid.
- * \warning Does NOT check if the stored mesh pointer is valid. You have to ensure this yourself when constructing the circulator. */
- inline bool
- isValid () const
- {
- return (idx_outgoing_half_edge_.isValid ());
- }
-
- /** \brief Comparison operators (with boost::operators): == !=
- * \warning Does NOT check if the circulators belong to the same mesh. Please check this yourself. */
- inline bool
- operator == (const Self& other) const
- {
- return (idx_outgoing_half_edge_ == other.idx_outgoing_half_edge_);
- }
-
- /** \brief Increment operators (with boost::operators): ++ (pre and post) */
- inline Self&
- operator ++ ()
- {
- idx_outgoing_half_edge_ = mesh_->getNextHalfEdgeIndex (mesh_->getOppositeHalfEdgeIndex (idx_outgoing_half_edge_));
- return (*this);
- }
-
- /** \brief Decrement operators (with boost::operators): -- (pre and post) */
- inline Self&
- operator -- ()
- {
- idx_outgoing_half_edge_ = mesh_->getOppositeHalfEdgeIndex (mesh_->getPrevHalfEdgeIndex (idx_outgoing_half_edge_));
- return (*this);
- }
-
- /** \brief Get the index to the outgoing half-edge. */
- inline HalfEdgeIndex
- getTargetIndex () const
- {
- return (idx_outgoing_half_edge_);
- }
-
- /** \brief Get the half-edge that is currently stored in the circulator. */
- inline HalfEdgeIndex
- getCurrentHalfEdgeIndex () const
- {
- return (idx_outgoing_half_edge_);
- }
-
- /** \brief The mesh to which this circulator belongs to. */
- Mesh* mesh_;
-
- /** \brief The outgoing half-edge of the vertex around which we want to circulate. */
- HalfEdgeIndex idx_outgoing_half_edge_;
- };
- } // End namespace geometry
+namespace pcl {
+namespace geometry {
+/** \brief Circulates counter-clockwise around a vertex and returns an index to the
+ * outgoing half-edge (the target). The best way to declare the circulator is to use the
+ * method pcl::geometry::MeshBase::getOutgoingHalfEdgeAroundVertexCirculator ().
+ * \tparam MeshT Mesh to which this circulator belongs to.
+ * \note The circulator can't be used to
+ * change the connectivity in the mesh (only const circulators are valid).
+ * \author Martin Saelzle
+ * \ingroup geometry
+ */
+template <class MeshT>
+class OutgoingHalfEdgeAroundVertexCirculator
+: boost::equality_comparable<
+ pcl::geometry::OutgoingHalfEdgeAroundVertexCirculator<MeshT>,
+ boost::unit_steppable<
+ pcl::geometry::OutgoingHalfEdgeAroundVertexCirculator<MeshT>>> {
+public:
+ using Base = boost::equality_comparable<
+ pcl::geometry::OutgoingHalfEdgeAroundVertexCirculator<MeshT>,
+ boost::unit_steppable<
+ pcl::geometry::OutgoingHalfEdgeAroundVertexCirculator<MeshT>>>;
+ using Self = pcl::geometry::OutgoingHalfEdgeAroundVertexCirculator<MeshT>;
+
+ using Mesh = MeshT;
+ using VertexIndex = typename Mesh::VertexIndex;
+ using HalfEdgeIndex = typename Mesh::HalfEdgeIndex;
+
+ /** \brief Constructor resulting in an invalid circulator. */
+ OutgoingHalfEdgeAroundVertexCirculator() : mesh_(nullptr), idx_outgoing_half_edge_()
+ {}
+
+ /** \brief Construct from the vertex around which we want to circulate. */
+ OutgoingHalfEdgeAroundVertexCirculator(const VertexIndex& idx_vertex,
+ Mesh* const mesh)
+ : mesh_(mesh), idx_outgoing_half_edge_(mesh->getOutgoingHalfEdgeIndex(idx_vertex))
+ {}
+
+ /** \brief Construct directly from the outgoing half-edge. */
+ OutgoingHalfEdgeAroundVertexCirculator(const HalfEdgeIndex& idx_outgoing_half_edge,
+ Mesh* const mesh)
+ : mesh_(mesh), idx_outgoing_half_edge_(idx_outgoing_half_edge)
+ {}
+
+ /** \brief Check if the circulator is valid.
+ * \warning Does NOT check if the stored mesh pointer is valid. You have to ensure
+ * this yourself when constructing the circulator. */
+ inline bool
+ isValid() const
+ {
+ return (idx_outgoing_half_edge_.isValid());
+ }
+
+ /** \brief Comparison operators (with boost::operators): == !=
+ * \warning Does NOT check if the circulators belong to the same mesh. Please check
+ * this yourself. */
+ inline bool
+ operator==(const Self& other) const
+ {
+ return (idx_outgoing_half_edge_ == other.idx_outgoing_half_edge_);
+ }
+
+ /** \brief Increment operators (with boost::operators): ++ (pre and post) */
+ inline Self&
+ operator++()
+ {
+ idx_outgoing_half_edge_ = mesh_->getNextHalfEdgeIndex(
+ mesh_->getOppositeHalfEdgeIndex(idx_outgoing_half_edge_));
+ return (*this);
+ }
+
+ /** \brief Decrement operators (with boost::operators): -- (pre and post) */
+ inline Self&
+ operator--()
+ {
+ idx_outgoing_half_edge_ = mesh_->getOppositeHalfEdgeIndex(
+ mesh_->getPrevHalfEdgeIndex(idx_outgoing_half_edge_));
+ return (*this);
+ }
+
+ /** \brief Get the index to the outgoing half-edge. */
+ inline HalfEdgeIndex
+ getTargetIndex() const
+ {
+ return (idx_outgoing_half_edge_);
+ }
+
+ /** \brief Get the half-edge that is currently stored in the circulator. */
+ inline HalfEdgeIndex
+ getCurrentHalfEdgeIndex() const
+ {
+ return (idx_outgoing_half_edge_);
+ }
+
+ /** \brief The mesh to which this circulator belongs to. */
+ Mesh* mesh_;
+
+ /** \brief The outgoing half-edge of the vertex around which we want to circulate. */
+ HalfEdgeIndex idx_outgoing_half_edge_;
+};
+} // End namespace geometry
} // End namespace pcl
////////////////////////////////////////////////////////////////////////////////
// IncomingHalfEdgeAroundVertexCirculator
////////////////////////////////////////////////////////////////////////////////
-namespace pcl
-{
- namespace geometry
- {
- /** \brief Circulates counter-clockwise around a vertex and returns an index to the incoming half-edge (the target). The best way to declare the circulator is to use the method pcl::geometry::MeshBase::getIncomingHalfEdgeAroundVertexCirculator ().
- * \tparam MeshT Mesh to which this circulator belongs to.
- * \note The circulator can't be used to change the connectivity in the mesh (only const circulators are valid).
- * \author Martin Saelzle
- * \ingroup geometry
- */
- template <class MeshT>
- class IncomingHalfEdgeAroundVertexCirculator
- : boost::equality_comparable <pcl::geometry::IncomingHalfEdgeAroundVertexCirculator <MeshT>
- , boost::unit_steppable <pcl::geometry::IncomingHalfEdgeAroundVertexCirculator <MeshT>
- > >
- {
- public:
-
- using Base = boost::equality_comparable <pcl::geometry::IncomingHalfEdgeAroundVertexCirculator <MeshT>,
- boost::unit_steppable <pcl::geometry::IncomingHalfEdgeAroundVertexCirculator <MeshT> > >;
- using Self = pcl::geometry::IncomingHalfEdgeAroundVertexCirculator<MeshT>;
-
- using Mesh = MeshT;
- using VertexIndex = typename Mesh::VertexIndex;
- using HalfEdgeIndex = typename Mesh::HalfEdgeIndex;
-
- /** \brief Constructor resulting in an invalid circulator. */
- IncomingHalfEdgeAroundVertexCirculator ()
- : mesh_ (nullptr),
- idx_incoming_half_edge_ ()
- {
- }
-
- /** \brief Construct from the vertex around which we want to circulate. */
- IncomingHalfEdgeAroundVertexCirculator (const VertexIndex& idx_vertex,
- Mesh*const mesh)
- : mesh_ (mesh),
- idx_incoming_half_edge_ (mesh->getIncomingHalfEdgeIndex (idx_vertex))
- {
- }
-
- /** \brief Construct directly from the incoming half-edge. */
- IncomingHalfEdgeAroundVertexCirculator (const HalfEdgeIndex& idx_incoming_half_edge,
- Mesh*const mesh)
- : mesh_ (mesh),
- idx_incoming_half_edge_ (idx_incoming_half_edge)
- {
- }
-
- /** \brief Check if the circulator is valid.
- * \warning Does NOT check if the stored mesh pointer is valid. You have to ensure this yourself when constructing the circulator. */
- inline bool
- isValid () const
- {
- return (idx_incoming_half_edge_.isValid ());
- }
-
- /** \brief Comparison operators (with boost::operators): == !=
- * \warning Does NOT check if the circulators belong to the same mesh. Please check this yourself. */
- inline bool
- operator == (const Self& other) const
- {
- return (idx_incoming_half_edge_ == other.idx_incoming_half_edge_);
- }
-
- /** \brief Increment operators (with boost::operators): ++ (pre and post) */
- inline Self&
- operator ++ ()
- {
- idx_incoming_half_edge_ = mesh_->getOppositeHalfEdgeIndex (mesh_->getNextHalfEdgeIndex (idx_incoming_half_edge_));
- return (*this);
- }
-
- /** \brief Decrement operators (with boost::operators): -- (pre and post) */
- inline Self&
- operator -- ()
- {
- idx_incoming_half_edge_ = mesh_->getPrevHalfEdgeIndex (mesh_->getOppositeHalfEdgeIndex (idx_incoming_half_edge_));
- return (*this);
- }
-
- /** \brief Get the index to the incoming half-edge. */
- inline HalfEdgeIndex
- getTargetIndex () const
- {
- return (idx_incoming_half_edge_);
- }
-
- /** \brief Get the half-edge that is currently stored in the circulator. */
- inline HalfEdgeIndex
- getCurrentHalfEdgeIndex () const
- {
- return (idx_incoming_half_edge_);
- }
-
- /** \brief The mesh to which this circulator belongs to. */
- Mesh* mesh_;
-
- /** \brief The incoming half-edge of the vertex around which we want to circulate. */
- HalfEdgeIndex idx_incoming_half_edge_;
- };
- } // End namespace geometry
+namespace pcl {
+namespace geometry {
+/** \brief Circulates counter-clockwise around a vertex and returns an index to the
+ * incoming half-edge (the target). The best way to declare the circulator is to use the
+ * method pcl::geometry::MeshBase::getIncomingHalfEdgeAroundVertexCirculator ().
+ * \tparam MeshT Mesh to which this circulator belongs to.
+ * \note The circulator can't be used to change the connectivity in the mesh (only
+ * const circulators are valid).
+ * \author Martin Saelzle
+ * \ingroup geometry
+ */
+template <class MeshT>
+class IncomingHalfEdgeAroundVertexCirculator
+: boost::equality_comparable<
+ pcl::geometry::IncomingHalfEdgeAroundVertexCirculator<MeshT>,
+ boost::unit_steppable<
+ pcl::geometry::IncomingHalfEdgeAroundVertexCirculator<MeshT>>> {
+public:
+ using Base = boost::equality_comparable<
+ pcl::geometry::IncomingHalfEdgeAroundVertexCirculator<MeshT>,
+ boost::unit_steppable<
+ pcl::geometry::IncomingHalfEdgeAroundVertexCirculator<MeshT>>>;
+ using Self = pcl::geometry::IncomingHalfEdgeAroundVertexCirculator<MeshT>;
+
+ using Mesh = MeshT;
+ using VertexIndex = typename Mesh::VertexIndex;
+ using HalfEdgeIndex = typename Mesh::HalfEdgeIndex;
+
+ /** \brief Constructor resulting in an invalid circulator. */
+ IncomingHalfEdgeAroundVertexCirculator() : mesh_(nullptr), idx_incoming_half_edge_()
+ {}
+
+ /** \brief Construct from the vertex around which we want to circulate. */
+ IncomingHalfEdgeAroundVertexCirculator(const VertexIndex& idx_vertex,
+ Mesh* const mesh)
+ : mesh_(mesh), idx_incoming_half_edge_(mesh->getIncomingHalfEdgeIndex(idx_vertex))
+ {}
+
+ /** \brief Construct directly from the incoming half-edge. */
+ IncomingHalfEdgeAroundVertexCirculator(const HalfEdgeIndex& idx_incoming_half_edge,
+ Mesh* const mesh)
+ : mesh_(mesh), idx_incoming_half_edge_(idx_incoming_half_edge)
+ {}
+
+ /** \brief Check if the circulator is valid.
+ * \warning Does NOT check if the stored mesh pointer is valid. You have to ensure
+ * this yourself when constructing the circulator. */
+ inline bool
+ isValid() const
+ {
+ return (idx_incoming_half_edge_.isValid());
+ }
+
+ /** \brief Comparison operators (with boost::operators): == !=
+ * \warning Does NOT check if the circulators belong to the same mesh. Please check
+ * this yourself. */
+ inline bool
+ operator==(const Self& other) const
+ {
+ return (idx_incoming_half_edge_ == other.idx_incoming_half_edge_);
+ }
+
+ /** \brief Increment operators (with boost::operators): ++ (pre and post) */
+ inline Self&
+ operator++()
+ {
+ idx_incoming_half_edge_ = mesh_->getOppositeHalfEdgeIndex(
+ mesh_->getNextHalfEdgeIndex(idx_incoming_half_edge_));
+ return (*this);
+ }
+
+ /** \brief Decrement operators (with boost::operators): -- (pre and post) */
+ inline Self&
+ operator--()
+ {
+ idx_incoming_half_edge_ = mesh_->getPrevHalfEdgeIndex(
+ mesh_->getOppositeHalfEdgeIndex(idx_incoming_half_edge_));
+ return (*this);
+ }
+
+ /** \brief Get the index to the incoming half-edge. */
+ inline HalfEdgeIndex
+ getTargetIndex() const
+ {
+ return (idx_incoming_half_edge_);
+ }
+
+ /** \brief Get the half-edge that is currently stored in the circulator. */
+ inline HalfEdgeIndex
+ getCurrentHalfEdgeIndex() const
+ {
+ return (idx_incoming_half_edge_);
+ }
+
+ /** \brief The mesh to which this circulator belongs to. */
+ Mesh* mesh_;
+
+ /** \brief The incoming half-edge of the vertex around which we want to circulate. */
+ HalfEdgeIndex idx_incoming_half_edge_;
+};
+} // End namespace geometry
} // End namespace pcl
////////////////////////////////////////////////////////////////////////////////
// FaceAroundVertexCirculator
////////////////////////////////////////////////////////////////////////////////
-namespace pcl
-{
- namespace geometry
- {
- /** \brief Circulates counter-clockwise around a vertex and returns an index to the face of the outgoing half-edge (the target). The best way to declare the circulator is to use the method pcl::geometry::MeshBase::getFaceAroundVertexCirculator ().
- * \tparam MeshT Mesh to which this circulator belongs to.
- * \note The circulator can't be used to change the connectivity in the mesh (only const circulators are valid).
- * \author Martin Saelzle
- * \ingroup geometry
- */
- template <class MeshT>
- class FaceAroundVertexCirculator
- : boost::equality_comparable <pcl::geometry::FaceAroundVertexCirculator <MeshT>
- , boost::unit_steppable <pcl::geometry::FaceAroundVertexCirculator <MeshT>
- > >
- {
- public:
-
- using Base = boost::equality_comparable <pcl::geometry::FaceAroundVertexCirculator <MeshT>,
- boost::unit_steppable <pcl::geometry::FaceAroundVertexCirculator <MeshT> > >;
- using Self = pcl::geometry::FaceAroundVertexCirculator<MeshT>;
-
- using Mesh = MeshT;
- using FaceIndex = typename Mesh::FaceIndex;
- using VertexIndex = typename Mesh::VertexIndex;
- using HalfEdgeIndex = typename Mesh::HalfEdgeIndex;
-
- /** \brief Constructor resulting in an invalid circulator. */
- FaceAroundVertexCirculator ()
- : mesh_ (nullptr),
- idx_outgoing_half_edge_ ()
- {
- }
-
- /** \brief Construct from the vertex around which we want to circulate. */
- FaceAroundVertexCirculator (const VertexIndex& idx_vertex,
- Mesh*const mesh)
- : mesh_ (mesh),
- idx_outgoing_half_edge_ (mesh->getOutgoingHalfEdgeIndex (idx_vertex))
- {
- }
-
- /** \brief Construct directly from the outgoing half-edge. */
- FaceAroundVertexCirculator (const HalfEdgeIndex& idx_outgoing_half_edge,
- Mesh*const mesh)
- : mesh_ (mesh),
- idx_outgoing_half_edge_ (idx_outgoing_half_edge)
- {
- }
-
- /** \brief Check if the circulator is valid.
- * \warning Does NOT check if the stored mesh pointer is valid. You have to ensure this yourself when constructing the circulator. */
- inline bool
- isValid () const
- {
- return (idx_outgoing_half_edge_.isValid ());
- }
-
- /** \brief Comparison operators (with boost::operators): == !=
- * \warning Does NOT check if the circulators belong to the same mesh. Please check this yourself. */
- inline bool
- operator == (const Self& other) const
- {
- return (idx_outgoing_half_edge_ == other.idx_outgoing_half_edge_);
- }
-
- /** \brief Increment operators (with boost::operators): ++ (pre and post) */
- inline Self&
- operator ++ ()
- {
- idx_outgoing_half_edge_ = mesh_->getNextHalfEdgeIndex (mesh_->getOppositeHalfEdgeIndex (idx_outgoing_half_edge_));
- return (*this);
- }
-
- /** \brief Decrement operators (with boost::operators): -- (pre and post) */
- inline Self&
- operator -- ()
- {
- idx_outgoing_half_edge_ = mesh_->getOppositeHalfEdgeIndex (mesh_->getPrevHalfEdgeIndex (idx_outgoing_half_edge_));
- return (*this);
- }
-
- /** \brief Get the index to the target face. */
- inline FaceIndex
- getTargetIndex () const
- {
- return (mesh_->getFaceIndex (idx_outgoing_half_edge_));
- }
-
- /** \brief Get the half-edge that is currently stored in the circulator. */
- inline HalfEdgeIndex
- getCurrentHalfEdgeIndex () const
- {
- return (idx_outgoing_half_edge_);
- }
-
- /** \brief The mesh to which this circulator belongs to. */
- Mesh* mesh_;
-
- /** \brief The outgoing half-edge of the vertex around which we want to circulate. */
- HalfEdgeIndex idx_outgoing_half_edge_;
- };
- } // End namespace geometry
+namespace pcl {
+namespace geometry {
+/** \brief Circulates counter-clockwise around a vertex and returns an index to the face
+ * of the outgoing half-edge (the target). The best way to declare the circulator is to
+ * use the method pcl::geometry::MeshBase::getFaceAroundVertexCirculator ().
+ * \tparam MeshT Mesh to which this circulator belongs to.
+ * \note The circulator can't be used to change the connectivity in the mesh (only
+ * const circulators are valid).
+ * \author Martin Saelzle
+ * \ingroup geometry
+ */
+template <class MeshT>
+class FaceAroundVertexCirculator
+: boost::equality_comparable<
+ pcl::geometry::FaceAroundVertexCirculator<MeshT>,
+ boost::unit_steppable<pcl::geometry::FaceAroundVertexCirculator<MeshT>>> {
+public:
+ using Base = boost::equality_comparable<
+ pcl::geometry::FaceAroundVertexCirculator<MeshT>,
+ boost::unit_steppable<pcl::geometry::FaceAroundVertexCirculator<MeshT>>>;
+ using Self = pcl::geometry::FaceAroundVertexCirculator<MeshT>;
+
+ using Mesh = MeshT;
+ using FaceIndex = typename Mesh::FaceIndex;
+ using VertexIndex = typename Mesh::VertexIndex;
+ using HalfEdgeIndex = typename Mesh::HalfEdgeIndex;
+
+ /** \brief Constructor resulting in an invalid circulator. */
+ FaceAroundVertexCirculator() : mesh_(nullptr), idx_outgoing_half_edge_() {}
+
+ /** \brief Construct from the vertex around which we want to circulate. */
+ FaceAroundVertexCirculator(const VertexIndex& idx_vertex, Mesh* const mesh)
+ : mesh_(mesh), idx_outgoing_half_edge_(mesh->getOutgoingHalfEdgeIndex(idx_vertex))
+ {}
+
+ /** \brief Construct directly from the outgoing half-edge. */
+ FaceAroundVertexCirculator(const HalfEdgeIndex& idx_outgoing_half_edge,
+ Mesh* const mesh)
+ : mesh_(mesh), idx_outgoing_half_edge_(idx_outgoing_half_edge)
+ {}
+
+ /** \brief Check if the circulator is valid.
+ * \warning Does NOT check if the stored mesh pointer is valid. You have to ensure
+ * this yourself when constructing the circulator. */
+ inline bool
+ isValid() const
+ {
+ return (idx_outgoing_half_edge_.isValid());
+ }
+
+ /** \brief Comparison operators (with boost::operators): == !=
+ * \warning Does NOT check if the circulators belong to the same mesh. Please check
+ * this yourself. */
+ inline bool
+ operator==(const Self& other) const
+ {
+ return (idx_outgoing_half_edge_ == other.idx_outgoing_half_edge_);
+ }
+
+ /** \brief Increment operators (with boost::operators): ++ (pre and post) */
+ inline Self&
+ operator++()
+ {
+ idx_outgoing_half_edge_ = mesh_->getNextHalfEdgeIndex(
+ mesh_->getOppositeHalfEdgeIndex(idx_outgoing_half_edge_));
+ return (*this);
+ }
+
+ /** \brief Decrement operators (with boost::operators): -- (pre and post) */
+ inline Self&
+ operator--()
+ {
+ idx_outgoing_half_edge_ = mesh_->getOppositeHalfEdgeIndex(
+ mesh_->getPrevHalfEdgeIndex(idx_outgoing_half_edge_));
+ return (*this);
+ }
+
+ /** \brief Get the index to the target face. */
+ inline FaceIndex
+ getTargetIndex() const
+ {
+ return (mesh_->getFaceIndex(idx_outgoing_half_edge_));
+ }
+
+ /** \brief Get the half-edge that is currently stored in the circulator. */
+ inline HalfEdgeIndex
+ getCurrentHalfEdgeIndex() const
+ {
+ return (idx_outgoing_half_edge_);
+ }
+
+ /** \brief The mesh to which this circulator belongs to. */
+ Mesh* mesh_;
+
+ /** \brief The outgoing half-edge of the vertex around which we want to circulate. */
+ HalfEdgeIndex idx_outgoing_half_edge_;
+};
+} // End namespace geometry
} // End namespace pcl
////////////////////////////////////////////////////////////////////////////////
// VertexAroundFaceCirculator
////////////////////////////////////////////////////////////////////////////////
-namespace pcl
-{
- namespace geometry
- {
- /** \brief Circulates clockwise around a face and returns an index to the terminating vertex of the inner half-edge (the target). The best way to declare the circulator is to use the method pcl::geometry::MeshBase::getVertexAroundFaceCirculator ().
- * \tparam MeshT Mesh to which this circulator belongs to.
- * \note The circulator can't be used to change the connectivity in the mesh (only const circulators are valid).
- * \author Martin Saelzle
- * \ingroup geometry
- */
- template <class MeshT>
- class VertexAroundFaceCirculator
- : boost::equality_comparable <pcl::geometry::VertexAroundFaceCirculator <MeshT>
- , boost::unit_steppable <pcl::geometry::VertexAroundFaceCirculator <MeshT>
- > >
- {
- public:
-
- using Base = boost::equality_comparable <pcl::geometry::VertexAroundFaceCirculator <MeshT>,
- boost::unit_steppable <pcl::geometry::VertexAroundFaceCirculator <MeshT> > >;
- using Self = pcl::geometry::VertexAroundFaceCirculator<MeshT>;
-
- using Mesh = MeshT;
- using VertexIndex = typename Mesh::VertexIndex;
- using FaceIndex = typename Mesh::FaceIndex;
- using HalfEdgeIndex = typename Mesh::HalfEdgeIndex;
-
- /** \brief Constructor resulting in an invalid circulator. */
- VertexAroundFaceCirculator ()
- : mesh_ (nullptr),
- idx_inner_half_edge_ ()
- {
- }
-
- /** \brief Construct from the face around which we want to circulate. */
- VertexAroundFaceCirculator (const FaceIndex& idx_face,
- Mesh*const mesh)
- : mesh_ (mesh),
- idx_inner_half_edge_ (mesh->getInnerHalfEdgeIndex (idx_face))
- {
- }
-
- /** \brief Construct directly from the inner half-edge. */
- VertexAroundFaceCirculator (const HalfEdgeIndex& idx_inner_half_edge,
- Mesh*const mesh)
- : mesh_ (mesh),
- idx_inner_half_edge_ (idx_inner_half_edge)
- {
- }
-
- /** \brief Check if the circulator is valid.
- * \warning Does NOT check if the stored mesh pointer is valid. You have to ensure this yourself when constructing the circulator. */
- inline bool
- isValid () const
- {
- return (idx_inner_half_edge_.isValid ());
- }
-
- /** \brief Comparison operators (with boost::operators): == !=
- * \warning Does NOT check if the circulators belong to the same mesh. Please check this yourself. */
- inline bool
- operator == (const Self& other) const
- {
- return (idx_inner_half_edge_ == other.idx_inner_half_edge_);
- }
-
- /** \brief Increment operators (with boost::operators): ++ (pre and post) */
- inline Self&
- operator ++ ()
- {
- idx_inner_half_edge_ = mesh_->getNextHalfEdgeIndex (idx_inner_half_edge_);
- return (*this);
- }
-
- /** \brief Decrement operators (with boost::operators): -- (pre and post) */
- inline Self&
- operator -- ()
- {
- idx_inner_half_edge_ = mesh_->getPrevHalfEdgeIndex (idx_inner_half_edge_);
- return (*this);
- }
-
- /** \brief Get the index to the target vertex. */
- inline VertexIndex
- getTargetIndex () const
- {
- return (mesh_->getTerminatingVertexIndex (idx_inner_half_edge_));
- }
-
- /** \brief Get the half-edge that is currently stored in the circulator. */
- inline HalfEdgeIndex
- getCurrentHalfEdgeIndex () const
- {
- return (idx_inner_half_edge_);
- }
-
- /** \brief The mesh to which this circulator belongs to. */
- Mesh* mesh_;
-
- /** \brief The inner half-edge of the face around which we want to circulate. */
- HalfEdgeIndex idx_inner_half_edge_;
- };
- } // End namespace geometry
+namespace pcl {
+namespace geometry {
+/** \brief Circulates clockwise around a face and returns an index to the terminating
+ * vertex of the inner half-edge (the target). The best way to declare the circulator is
+ * to use the method pcl::geometry::MeshBase::getVertexAroundFaceCirculator ().
+ * \tparam MeshT Mesh to which this circulator belongs to.
+ * \note The circulator can't be used to change the connectivity in the mesh (only
+ * const circulators are valid).
+ * \author Martin Saelzle
+ * \ingroup geometry
+ */
+template <class MeshT>
+class VertexAroundFaceCirculator
+: boost::equality_comparable<
+ pcl::geometry::VertexAroundFaceCirculator<MeshT>,
+ boost::unit_steppable<pcl::geometry::VertexAroundFaceCirculator<MeshT>>> {
+public:
+ using Base = boost::equality_comparable<
+ pcl::geometry::VertexAroundFaceCirculator<MeshT>,
+ boost::unit_steppable<pcl::geometry::VertexAroundFaceCirculator<MeshT>>>;
+ using Self = pcl::geometry::VertexAroundFaceCirculator<MeshT>;
+
+ using Mesh = MeshT;
+ using VertexIndex = typename Mesh::VertexIndex;
+ using FaceIndex = typename Mesh::FaceIndex;
+ using HalfEdgeIndex = typename Mesh::HalfEdgeIndex;
+
+ /** \brief Constructor resulting in an invalid circulator. */
+ VertexAroundFaceCirculator() : mesh_(nullptr), idx_inner_half_edge_() {}
+
+ /** \brief Construct from the face around which we want to circulate. */
+ VertexAroundFaceCirculator(const FaceIndex& idx_face, Mesh* const mesh)
+ : mesh_(mesh), idx_inner_half_edge_(mesh->getInnerHalfEdgeIndex(idx_face))
+ {}
+
+ /** \brief Construct directly from the inner half-edge. */
+ VertexAroundFaceCirculator(const HalfEdgeIndex& idx_inner_half_edge, Mesh* const mesh)
+ : mesh_(mesh), idx_inner_half_edge_(idx_inner_half_edge)
+ {}
+
+ /** \brief Check if the circulator is valid.
+ * \warning Does NOT check if the stored mesh pointer is valid. You have to ensure
+ * this yourself when constructing the circulator. */
+ inline bool
+ isValid() const
+ {
+ return (idx_inner_half_edge_.isValid());
+ }
+
+ /** \brief Comparison operators (with boost::operators): == !=
+ * \warning Does NOT check if the circulators belong to the same mesh. Please check
+ * this yourself. */
+ inline bool
+ operator==(const Self& other) const
+ {
+ return (idx_inner_half_edge_ == other.idx_inner_half_edge_);
+ }
+
+ /** \brief Increment operators (with boost::operators): ++ (pre and post) */
+ inline Self&
+ operator++()
+ {
+ idx_inner_half_edge_ = mesh_->getNextHalfEdgeIndex(idx_inner_half_edge_);
+ return (*this);
+ }
+
+ /** \brief Decrement operators (with boost::operators): -- (pre and post) */
+ inline Self&
+ operator--()
+ {
+ idx_inner_half_edge_ = mesh_->getPrevHalfEdgeIndex(idx_inner_half_edge_);
+ return (*this);
+ }
+
+ /** \brief Get the index to the target vertex. */
+ inline VertexIndex
+ getTargetIndex() const
+ {
+ return (mesh_->getTerminatingVertexIndex(idx_inner_half_edge_));
+ }
+
+ /** \brief Get the half-edge that is currently stored in the circulator. */
+ inline HalfEdgeIndex
+ getCurrentHalfEdgeIndex() const
+ {
+ return (idx_inner_half_edge_);
+ }
+
+ /** \brief The mesh to which this circulator belongs to. */
+ Mesh* mesh_;
+
+ /** \brief The inner half-edge of the face around which we want to circulate. */
+ HalfEdgeIndex idx_inner_half_edge_;
+};
+} // End namespace geometry
} // End namespace pcl
////////////////////////////////////////////////////////////////////////////////
// InnerHalfEdgeAroundFaceCirculator
////////////////////////////////////////////////////////////////////////////////
-namespace pcl
-{
- namespace geometry
- {
- /** \brief Circulates clockwise around a face and returns an index to the inner half-edge (the target). The best way to declare the circulator is to use the method pcl::geometry::MeshBase::getInnerHalfEdgeAroundFaceCirculator ().
- * \tparam MeshT Mesh to which this circulator belongs to.
- * \note The circulator can't be used to change the connectivity in the mesh (only const circulators are valid).
- * \author Martin Saelzle
- * \ingroup geometry
- */
- template <class MeshT>
- class InnerHalfEdgeAroundFaceCirculator
- : boost::equality_comparable <pcl::geometry::InnerHalfEdgeAroundFaceCirculator <MeshT>
- , boost::unit_steppable <pcl::geometry::InnerHalfEdgeAroundFaceCirculator <MeshT>
- > >
- {
- public:
-
- using Base = boost::equality_comparable <pcl::geometry::InnerHalfEdgeAroundFaceCirculator <MeshT>,
- boost::unit_steppable <pcl::geometry::InnerHalfEdgeAroundFaceCirculator <MeshT> > >;
- using Self = pcl::geometry::InnerHalfEdgeAroundFaceCirculator<MeshT>;
-
- using Mesh = MeshT;
- using FaceIndex = typename Mesh::FaceIndex;
- using HalfEdgeIndex = typename Mesh::HalfEdgeIndex;
-
- /** \brief Constructor resulting in an invalid circulator. */
- InnerHalfEdgeAroundFaceCirculator ()
- : mesh_ (nullptr),
- idx_inner_half_edge_ ()
- {
- }
-
- /** \brief Construct from the face around which we want to circulate. */
- InnerHalfEdgeAroundFaceCirculator (const FaceIndex& idx_face,
- Mesh*const mesh)
- : mesh_ (mesh),
- idx_inner_half_edge_ (mesh->getInnerHalfEdgeIndex (idx_face))
- {
- }
-
- /** \brief Construct directly from the inner half-edge. */
- InnerHalfEdgeAroundFaceCirculator (const HalfEdgeIndex& idx_inner_half_edge,
- Mesh*const mesh)
- : mesh_ (mesh),
- idx_inner_half_edge_ (idx_inner_half_edge)
- {
- }
-
- /** \brief Check if the circulator is valid.
- * \warning Does NOT check if the stored mesh pointer is valid. You have to ensure this yourself when constructing the circulator. */
- inline bool
- isValid () const
- {
- return (idx_inner_half_edge_.isValid ());
- }
-
- /** \brief Comparison operators (with boost::operators): == !=
- * \warning Does NOT check if the circulators belong to the same mesh. Please check this yourself. */
- inline bool
- operator == (const Self& other) const
- {
- return (idx_inner_half_edge_ == other.idx_inner_half_edge_);
- }
-
- /** \brief Increment operators (with boost::operators): ++ (pre and post) */
- inline Self&
- operator ++ ()
- {
- idx_inner_half_edge_ = mesh_->getNextHalfEdgeIndex (idx_inner_half_edge_);
- return (*this);
- }
-
- /** \brief Decrement operators (with boost::operators): -- (pre and post) */
- inline Self&
- operator -- ()
- {
- idx_inner_half_edge_ = mesh_->getPrevHalfEdgeIndex (idx_inner_half_edge_);
- return (*this);
- }
-
- /** \brief Get the index to the inner half-edge. */
- inline HalfEdgeIndex
- getTargetIndex () const
- {
- return (idx_inner_half_edge_);
- }
-
- /** \brief Get the half-edge that is currently stored in the circulator. */
- inline HalfEdgeIndex
- getCurrentHalfEdgeIndex () const
- {
- return (idx_inner_half_edge_);
- }
-
- /** \brief The mesh to which this circulator belongs to. */
- Mesh* mesh_;
-
- /** \brief The inner half-edge of the face around which we want to circulate. */
- HalfEdgeIndex idx_inner_half_edge_;
- };
- } // End namespace geometry
+namespace pcl {
+namespace geometry {
+/** \brief Circulates clockwise around a face and returns an index to the inner
+ * half-edge (the target). The best way to declare the circulator is to use the method
+ * pcl::geometry::MeshBase::getInnerHalfEdgeAroundFaceCirculator ().
+ * \tparam MeshT Mesh to which this circulator belongs to.
+ * \note The circulator can't be used to change the connectivity in the mesh (only
+ * const circulators are valid).
+ * \author Martin Saelzle
+ * \ingroup geometry
+ */
+template <class MeshT>
+class InnerHalfEdgeAroundFaceCirculator
+: boost::equality_comparable<
+ pcl::geometry::InnerHalfEdgeAroundFaceCirculator<MeshT>,
+ boost::unit_steppable<pcl::geometry::InnerHalfEdgeAroundFaceCirculator<MeshT>>> {
+public:
+ using Base = boost::equality_comparable<
+ pcl::geometry::InnerHalfEdgeAroundFaceCirculator<MeshT>,
+ boost::unit_steppable<pcl::geometry::InnerHalfEdgeAroundFaceCirculator<MeshT>>>;
+ using Self = pcl::geometry::InnerHalfEdgeAroundFaceCirculator<MeshT>;
+
+ using Mesh = MeshT;
+ using FaceIndex = typename Mesh::FaceIndex;
+ using HalfEdgeIndex = typename Mesh::HalfEdgeIndex;
+
+ /** \brief Constructor resulting in an invalid circulator. */
+ InnerHalfEdgeAroundFaceCirculator() : mesh_(nullptr), idx_inner_half_edge_() {}
+
+ /** \brief Construct from the face around which we want to circulate. */
+ InnerHalfEdgeAroundFaceCirculator(const FaceIndex& idx_face, Mesh* const mesh)
+ : mesh_(mesh), idx_inner_half_edge_(mesh->getInnerHalfEdgeIndex(idx_face))
+ {}
+
+ /** \brief Construct directly from the inner half-edge. */
+ InnerHalfEdgeAroundFaceCirculator(const HalfEdgeIndex& idx_inner_half_edge,
+ Mesh* const mesh)
+ : mesh_(mesh), idx_inner_half_edge_(idx_inner_half_edge)
+ {}
+
+ /** \brief Check if the circulator is valid.
+ * \warning Does NOT check if the stored mesh pointer is valid. You have to ensure
+ * this yourself when constructing the circulator. */
+ inline bool
+ isValid() const
+ {
+ return (idx_inner_half_edge_.isValid());
+ }
+
+ /** \brief Comparison operators (with boost::operators): == !=
+ * \warning Does NOT check if the circulators belong to the same mesh. Please check
+ * this yourself. */
+ inline bool
+ operator==(const Self& other) const
+ {
+ return (idx_inner_half_edge_ == other.idx_inner_half_edge_);
+ }
+
+ /** \brief Increment operators (with boost::operators): ++ (pre and post) */
+ inline Self&
+ operator++()
+ {
+ idx_inner_half_edge_ = mesh_->getNextHalfEdgeIndex(idx_inner_half_edge_);
+ return (*this);
+ }
+
+ /** \brief Decrement operators (with boost::operators): -- (pre and post) */
+ inline Self&
+ operator--()
+ {
+ idx_inner_half_edge_ = mesh_->getPrevHalfEdgeIndex(idx_inner_half_edge_);
+ return (*this);
+ }
+
+ /** \brief Get the index to the inner half-edge. */
+ inline HalfEdgeIndex
+ getTargetIndex() const
+ {
+ return (idx_inner_half_edge_);
+ }
+
+ /** \brief Get the half-edge that is currently stored in the circulator. */
+ inline HalfEdgeIndex
+ getCurrentHalfEdgeIndex() const
+ {
+ return (idx_inner_half_edge_);
+ }
+
+ /** \brief The mesh to which this circulator belongs to. */
+ Mesh* mesh_;
+
+ /** \brief The inner half-edge of the face around which we want to circulate. */
+ HalfEdgeIndex idx_inner_half_edge_;
+};
+} // End namespace geometry
} // End namespace pcl
////////////////////////////////////////////////////////////////////////////////
// OuterHalfEdgeAroundFaceCirculator
////////////////////////////////////////////////////////////////////////////////
-namespace pcl
-{
- namespace geometry
- {
- /** \brief Circulates clockwise around a face and returns an index to the outer half-edge (the target). The best way to declare the circulator is to use the method pcl::geometry::MeshBase::getOuterHalfEdgeAroundFaceCirculator ().
- * \tparam MeshT Mesh to which this circulator belongs to.
- * \note The circulator can't be used to change the connectivity in the mesh (only const circulators are valid).
- * \author Martin Saelzle
- * \ingroup geometry
- */
- template <class MeshT>
- class OuterHalfEdgeAroundFaceCirculator
- : boost::equality_comparable <pcl::geometry::OuterHalfEdgeAroundFaceCirculator <MeshT>
- , boost::unit_steppable <pcl::geometry::OuterHalfEdgeAroundFaceCirculator <MeshT>
- > >
- {
- public:
-
- using Base = boost::equality_comparable <pcl::geometry::OuterHalfEdgeAroundFaceCirculator <MeshT>,
- boost::unit_steppable <pcl::geometry::OuterHalfEdgeAroundFaceCirculator <MeshT> > >;
- using Self = pcl::geometry::OuterHalfEdgeAroundFaceCirculator<MeshT>;
-
- using Mesh = MeshT;
- using FaceIndex = typename Mesh::FaceIndex;
- using HalfEdgeIndex = typename Mesh::HalfEdgeIndex;
-
- /** \brief Constructor resulting in an invalid circulator. */
- OuterHalfEdgeAroundFaceCirculator ()
- : mesh_ (nullptr),
- idx_inner_half_edge_ ()
- {
- }
-
- /** \brief Construct from the face around which we want to circulate. */
- OuterHalfEdgeAroundFaceCirculator (const FaceIndex& idx_face,
- Mesh*const mesh)
- : mesh_ (mesh),
- idx_inner_half_edge_ (mesh->getInnerHalfEdgeIndex (idx_face))
- {
- }
-
- /** \brief Construct directly from the inner half-edge. */
- OuterHalfEdgeAroundFaceCirculator (const HalfEdgeIndex& idx_inner_half_edge,
- Mesh*const mesh)
- : mesh_ (mesh),
- idx_inner_half_edge_ (idx_inner_half_edge)
- {
- }
-
- /** \brief Check if the circulator is valid.
- * \warning Does NOT check if the stored mesh pointer is valid. You have to ensure this yourself when constructing the circulator. */
- inline bool
- isValid () const
- {
- return (idx_inner_half_edge_.isValid ());
- }
-
- /** \brief Comparison operators (with boost::operators): == !=
- * \warning Does NOT check if the circulators belong to the same mesh. Please check this yourself. */
- inline bool
- operator == (const Self& other) const
- {
- return (idx_inner_half_edge_ == other.idx_inner_half_edge_);
- }
-
- /** \brief Increment operators (with boost::operators): ++ (pre and post) */
- inline Self&
- operator ++ ()
- {
- idx_inner_half_edge_ = mesh_->getNextHalfEdgeIndex (idx_inner_half_edge_);
- return (*this);
- }
-
- /** \brief Decrement operators (with boost::operators): -- (pre and post) */
- inline Self&
- operator -- ()
- {
- idx_inner_half_edge_ = mesh_->getPrevHalfEdgeIndex (idx_inner_half_edge_);
- return (*this);
- }
-
- /** \brief Get the index to the outer half-edge. */
- inline HalfEdgeIndex
- getTargetIndex () const
- {
- return (mesh_->getOppositeHalfEdgeIndex (idx_inner_half_edge_));
- }
-
- /** \brief Get the half-edge that is currently stored in the circulator. */
- inline HalfEdgeIndex
- getCurrentHalfEdgeIndex () const
- {
- return (idx_inner_half_edge_);
- }
-
- /** \brief The mesh to which this circulator belongs to. */
- Mesh* mesh_;
-
- /** \brief The inner half-edge of the face around which we want to circulate. */
- HalfEdgeIndex idx_inner_half_edge_;
- };
- } // End namespace geometry
+namespace pcl {
+namespace geometry {
+/** \brief Circulates clockwise around a face and returns an index to the outer
+ * half-edge (the target). The best way to declare the circulator is to use the method
+ * pcl::geometry::MeshBase::getOuterHalfEdgeAroundFaceCirculator ().
+ * \tparam MeshT Mesh to which this circulator belongs to.
+ * \note The circulator can't be used to change the connectivity in the mesh (only
+ * const circulators are valid).
+ * \author Martin Saelzle
+ * \ingroup geometry
+ */
+template <class MeshT>
+class OuterHalfEdgeAroundFaceCirculator
+: boost::equality_comparable<
+ pcl::geometry::OuterHalfEdgeAroundFaceCirculator<MeshT>,
+ boost::unit_steppable<pcl::geometry::OuterHalfEdgeAroundFaceCirculator<MeshT>>> {
+public:
+ using Base = boost::equality_comparable<
+ pcl::geometry::OuterHalfEdgeAroundFaceCirculator<MeshT>,
+ boost::unit_steppable<pcl::geometry::OuterHalfEdgeAroundFaceCirculator<MeshT>>>;
+ using Self = pcl::geometry::OuterHalfEdgeAroundFaceCirculator<MeshT>;
+
+ using Mesh = MeshT;
+ using FaceIndex = typename Mesh::FaceIndex;
+ using HalfEdgeIndex = typename Mesh::HalfEdgeIndex;
+
+ /** \brief Constructor resulting in an invalid circulator. */
+ OuterHalfEdgeAroundFaceCirculator() : mesh_(nullptr), idx_inner_half_edge_() {}
+
+ /** \brief Construct from the face around which we want to circulate. */
+ OuterHalfEdgeAroundFaceCirculator(const FaceIndex& idx_face, Mesh* const mesh)
+ : mesh_(mesh), idx_inner_half_edge_(mesh->getInnerHalfEdgeIndex(idx_face))
+ {}
+
+ /** \brief Construct directly from the inner half-edge. */
+ OuterHalfEdgeAroundFaceCirculator(const HalfEdgeIndex& idx_inner_half_edge,
+ Mesh* const mesh)
+ : mesh_(mesh), idx_inner_half_edge_(idx_inner_half_edge)
+ {}
+
+ /** \brief Check if the circulator is valid.
+ * \warning Does NOT check if the stored mesh pointer is valid. You have to ensure
+ * this yourself when constructing the circulator. */
+ inline bool
+ isValid() const
+ {
+ return (idx_inner_half_edge_.isValid());
+ }
+
+ /** \brief Comparison operators (with boost::operators): == !=
+ * \warning Does NOT check if the circulators belong to the same mesh. Please check
+ * this yourself. */
+ inline bool
+ operator==(const Self& other) const
+ {
+ return (idx_inner_half_edge_ == other.idx_inner_half_edge_);
+ }
+
+ /** \brief Increment operators (with boost::operators): ++ (pre and post) */
+ inline Self&
+ operator++()
+ {
+ idx_inner_half_edge_ = mesh_->getNextHalfEdgeIndex(idx_inner_half_edge_);
+ return (*this);
+ }
+
+ /** \brief Decrement operators (with boost::operators): -- (pre and post) */
+ inline Self&
+ operator--()
+ {
+ idx_inner_half_edge_ = mesh_->getPrevHalfEdgeIndex(idx_inner_half_edge_);
+ return (*this);
+ }
+
+ /** \brief Get the index to the outer half-edge. */
+ inline HalfEdgeIndex
+ getTargetIndex() const
+ {
+ return (mesh_->getOppositeHalfEdgeIndex(idx_inner_half_edge_));
+ }
+
+ /** \brief Get the half-edge that is currently stored in the circulator. */
+ inline HalfEdgeIndex
+ getCurrentHalfEdgeIndex() const
+ {
+ return (idx_inner_half_edge_);
+ }
+
+ /** \brief The mesh to which this circulator belongs to. */
+ Mesh* mesh_;
+
+ /** \brief The inner half-edge of the face around which we want to circulate. */
+ HalfEdgeIndex idx_inner_half_edge_;
+};
+} // End namespace geometry
} // End namespace pcl
////////////////////////////////////////////////////////////////////////////////
// FaceAroundFaceCirculator
////////////////////////////////////////////////////////////////////////////////
-namespace pcl
-{
- namespace geometry
- {
- /** \brief Circulates clockwise around a face and returns an index to the face of the outer half-edge (the target). The best way to declare the circulator is to use the method pcl::geometry::MeshBase::getFaceAroundFaceCirculator ().
- * \tparam MeshT Mesh to which this circulator belongs to.
- * \note The circulator can't be used to change the connectivity in the mesh (only const circulators are valid).
- * \author Martin Saelzle
- * \ingroup geometry
- */
- template <class MeshT>
- class FaceAroundFaceCirculator
- : boost::equality_comparable <pcl::geometry::FaceAroundFaceCirculator <MeshT>
- , boost::unit_steppable <pcl::geometry::FaceAroundFaceCirculator <MeshT>
- > >
- {
- public:
-
- using Base = boost::equality_comparable <pcl::geometry::FaceAroundFaceCirculator <MeshT>,
- boost::unit_steppable <pcl::geometry::FaceAroundFaceCirculator <MeshT> > >;
- using Self = pcl::geometry::FaceAroundFaceCirculator<MeshT>;
-
- using Mesh = MeshT;
- using FaceIndex = typename Mesh::FaceIndex;
- using HalfEdgeIndex = typename Mesh::HalfEdgeIndex;
-
- /** \brief Constructor resulting in an invalid circulator. */
- FaceAroundFaceCirculator ()
- : mesh_ (nullptr),
- idx_inner_half_edge_ ()
- {
- }
-
- /** \brief Construct from the face around which we want to circulate. */
- FaceAroundFaceCirculator (const FaceIndex& idx_face,
- Mesh*const mesh)
- : mesh_ (mesh),
- idx_inner_half_edge_ (mesh->getInnerHalfEdgeIndex (idx_face))
- {
- }
-
- /** \brief Construct directly from the inner half-edge. */
- FaceAroundFaceCirculator (const HalfEdgeIndex& idx_inner_half_edge,
- Mesh*const mesh)
- : mesh_ (mesh),
- idx_inner_half_edge_ (idx_inner_half_edge)
- {
- }
-
- /** \brief Check if the circulator is valid.
- * \warning Does NOT check if the stored mesh pointer is valid. You have to ensure this yourself when constructing the circulator. */
- inline bool
- isValid () const
- {
- return (idx_inner_half_edge_.isValid ());
- }
-
- /** \brief Comparison operators (with boost::operators): == !=
- * \warning Does NOT check if the circulators belong to the same mesh. Please check this yourself. */
- inline bool
- operator == (const Self& other) const
- {
- return (idx_inner_half_edge_ == other.idx_inner_half_edge_);
- }
-
- /** \brief Increment operators (with boost::operators): ++ (pre and post) */
- inline Self&
- operator ++ ()
- {
- idx_inner_half_edge_ = mesh_->getNextHalfEdgeIndex (idx_inner_half_edge_);
- return (*this);
- }
-
- /** \brief Decrement operators (with boost::operators): -- (pre and post) */
- inline Self&
- operator -- ()
- {
- idx_inner_half_edge_ = mesh_->getPrevHalfEdgeIndex (idx_inner_half_edge_);
- return (*this);
- }
-
- /** \brief Get the index to the target face. */
- inline FaceIndex
- getTargetIndex () const
- {
- return (mesh_->getOppositeFaceIndex (idx_inner_half_edge_));
- }
-
- /** \brief Get the half-edge that is currently stored in the circulator. */
- inline HalfEdgeIndex
- getCurrentHalfEdgeIndex () const
- {
- return (idx_inner_half_edge_);
- }
-
- /** \brief The mesh to which this circulator belongs to. */
- Mesh* mesh_;
-
- /** \brief The inner half-edge of the face around which we want to circulate. */
- HalfEdgeIndex idx_inner_half_edge_;
- };
- } // End namespace geometry
+namespace pcl {
+namespace geometry {
+/** \brief Circulates clockwise around a face and returns an index to the face of the
+ * outer half-edge (the target). The best way to declare the circulator is to use the
+ * method pcl::geometry::MeshBase::getFaceAroundFaceCirculator ().
+ * \tparam MeshT Mesh to which this circulator belongs to.
+ * \note The circulator can't be used to change the connectivity in the mesh (only
+ * const circulators are valid).
+ * \author Martin Saelzle
+ * \ingroup geometry
+ */
+template <class MeshT>
+class FaceAroundFaceCirculator
+: boost::equality_comparable<
+ pcl::geometry::FaceAroundFaceCirculator<MeshT>,
+ boost::unit_steppable<pcl::geometry::FaceAroundFaceCirculator<MeshT>>> {
+public:
+ using Base = boost::equality_comparable<
+ pcl::geometry::FaceAroundFaceCirculator<MeshT>,
+ boost::unit_steppable<pcl::geometry::FaceAroundFaceCirculator<MeshT>>>;
+ using Self = pcl::geometry::FaceAroundFaceCirculator<MeshT>;
+
+ using Mesh = MeshT;
+ using FaceIndex = typename Mesh::FaceIndex;
+ using HalfEdgeIndex = typename Mesh::HalfEdgeIndex;
+
+ /** \brief Constructor resulting in an invalid circulator. */
+ FaceAroundFaceCirculator() : mesh_(nullptr), idx_inner_half_edge_() {}
+
+ /** \brief Construct from the face around which we want to circulate. */
+ FaceAroundFaceCirculator(const FaceIndex& idx_face, Mesh* const mesh)
+ : mesh_(mesh), idx_inner_half_edge_(mesh->getInnerHalfEdgeIndex(idx_face))
+ {}
+
+ /** \brief Construct directly from the inner half-edge. */
+ FaceAroundFaceCirculator(const HalfEdgeIndex& idx_inner_half_edge, Mesh* const mesh)
+ : mesh_(mesh), idx_inner_half_edge_(idx_inner_half_edge)
+ {}
+
+ /** \brief Check if the circulator is valid.
+ * \warning Does NOT check if the stored mesh pointer is valid. You have to ensure
+ * this yourself when constructing the circulator. */
+ inline bool
+ isValid() const
+ {
+ return (idx_inner_half_edge_.isValid());
+ }
+
+ /** \brief Comparison operators (with boost::operators): == !=
+ * \warning Does NOT check if the circulators belong to the same mesh. Please check
+ * this yourself. */
+ inline bool
+ operator==(const Self& other) const
+ {
+ return (idx_inner_half_edge_ == other.idx_inner_half_edge_);
+ }
+
+ /** \brief Increment operators (with boost::operators): ++ (pre and post) */
+ inline Self&
+ operator++()
+ {
+ idx_inner_half_edge_ = mesh_->getNextHalfEdgeIndex(idx_inner_half_edge_);
+ return (*this);
+ }
+
+ /** \brief Decrement operators (with boost::operators): -- (pre and post) */
+ inline Self&
+ operator--()
+ {
+ idx_inner_half_edge_ = mesh_->getPrevHalfEdgeIndex(idx_inner_half_edge_);
+ return (*this);
+ }
+
+ /** \brief Get the index to the target face. */
+ inline FaceIndex
+ getTargetIndex() const
+ {
+ return (mesh_->getOppositeFaceIndex(idx_inner_half_edge_));
+ }
+
+ /** \brief Get the half-edge that is currently stored in the circulator. */
+ inline HalfEdgeIndex
+ getCurrentHalfEdgeIndex() const
+ {
+ return (idx_inner_half_edge_);
+ }
+
+ /** \brief The mesh to which this circulator belongs to. */
+ Mesh* mesh_;
+
+ /** \brief The inner half-edge of the face around which we want to circulate. */
+ HalfEdgeIndex idx_inner_half_edge_;
+};
+} // End namespace geometry
} // End namespace pcl
#include <pcl/PolygonMesh.h>
#include <pcl/conversions.h>
-namespace pcl
+namespace pcl {
+namespace geometry {
+/** \brief Convert a half-edge mesh to a face-vertex mesh.
+ * \param[in] half_edge_mesh The input mesh.
+ * \param[out] face_vertex_mesh The output mesh.
+ * \author Martin Saelzle
+ * \ingroup geometry
+ */
+template <class HalfEdgeMeshT>
+void
+toFaceVertexMesh(const HalfEdgeMeshT& half_edge_mesh,
+ pcl::PolygonMesh& face_vertex_mesh)
{
- namespace geometry
- {
- /** \brief Convert a half-edge mesh to a face-vertex mesh.
- * \param[in] half_edge_mesh The input mesh.
- * \param[out] face_vertex_mesh The output mesh.
- * \author Martin Saelzle
- * \ingroup geometry
- */
- template <class HalfEdgeMeshT> void
- toFaceVertexMesh (const HalfEdgeMeshT& half_edge_mesh, pcl::PolygonMesh& face_vertex_mesh)
- {
- using HalfEdgeMesh = HalfEdgeMeshT;
- using VAFC = typename HalfEdgeMesh::VertexAroundFaceCirculator;
- using FaceIndex = typename HalfEdgeMesh::FaceIndex;
-
- pcl::Vertices polygon;
- pcl::toPCLPointCloud2 (half_edge_mesh.getVertexDataCloud (), face_vertex_mesh.cloud);
+ using HalfEdgeMesh = HalfEdgeMeshT;
+ using VAFC = typename HalfEdgeMesh::VertexAroundFaceCirculator;
+ using FaceIndex = typename HalfEdgeMesh::FaceIndex;
- face_vertex_mesh.polygons.reserve (half_edge_mesh.sizeFaces ());
- for (std::size_t i=0; i<half_edge_mesh.sizeFaces (); ++i)
- {
- VAFC circ = half_edge_mesh.getVertexAroundFaceCirculator (FaceIndex (i));
- const VAFC circ_end = circ;
- polygon.vertices.clear ();
- do
- {
- polygon.vertices.push_back (circ.getTargetIndex ().get ());
- } while (++circ != circ_end);
- face_vertex_mesh.polygons.push_back (polygon);
- }
- }
+ pcl::Vertices polygon;
+ pcl::toPCLPointCloud2(half_edge_mesh.getVertexDataCloud(), face_vertex_mesh.cloud);
- /** \brief Convert a face-vertex mesh to a half-edge mesh.
- * \param[in] face_vertex_mesh The input mesh.
- * \param[out] half_edge_mesh The output mesh. It must have data associated with the vertices.
- * \return The number of faces that could NOT be added to the half-edge mesh.
- * \author Martin Saelzle
- * \ingroup geometry
- */
- template <class HalfEdgeMeshT> int
- toHalfEdgeMesh (const pcl::PolygonMesh& face_vertex_mesh, HalfEdgeMeshT& half_edge_mesh)
- {
- using HalfEdgeMesh = HalfEdgeMeshT;
- using VertexDataCloud = typename HalfEdgeMesh::VertexDataCloud;
- using VertexIndices = typename HalfEdgeMesh::VertexIndices;
+ face_vertex_mesh.polygons.reserve(half_edge_mesh.sizeFaces());
+ for (std::size_t i = 0; i < half_edge_mesh.sizeFaces(); ++i) {
+ VAFC circ = half_edge_mesh.getVertexAroundFaceCirculator(FaceIndex(i));
+ const VAFC circ_end = circ;
+ polygon.vertices.clear();
+ do {
+ polygon.vertices.push_back(circ.getTargetIndex().get());
+ } while (++circ != circ_end);
+ face_vertex_mesh.polygons.push_back(polygon);
+ }
+}
- static_assert (HalfEdgeMesh::HasVertexData::value, "Output mesh must have data associated with the vertices!");
+/** \brief Convert a face-vertex mesh to a half-edge mesh.
+ * \param[in] face_vertex_mesh The input mesh.
+ * \param[out] half_edge_mesh The output mesh. It must have data associated with the
+ * vertices.
+ * \return The number of faces that could NOT be added to the half-edge mesh.
+ * \author Martin Saelzle
+ * \ingroup geometry
+ */
+template <class HalfEdgeMeshT>
+int
+toHalfEdgeMesh(const pcl::PolygonMesh& face_vertex_mesh, HalfEdgeMeshT& half_edge_mesh)
+{
+ using HalfEdgeMesh = HalfEdgeMeshT;
+ using VertexDataCloud = typename HalfEdgeMesh::VertexDataCloud;
+ using VertexIndices = typename HalfEdgeMesh::VertexIndices;
- VertexDataCloud vertices;
- pcl::fromPCLPointCloud2 (face_vertex_mesh.cloud, vertices);
+ static_assert(HalfEdgeMesh::HasVertexData::value,
+ "Output mesh must have data associated with the vertices!");
- half_edge_mesh.reserveVertices (vertices.size ());
- half_edge_mesh.reserveEdges (3 * face_vertex_mesh.polygons.size ());
- half_edge_mesh.reserveFaces ( face_vertex_mesh.polygons.size ());
+ VertexDataCloud vertices;
+ pcl::fromPCLPointCloud2(face_vertex_mesh.cloud, vertices);
- for (const auto &vertex : vertices)
- {
- half_edge_mesh.addVertex (vertex);
- }
+ half_edge_mesh.reserveVertices(vertices.size());
+ half_edge_mesh.reserveEdges(3 * face_vertex_mesh.polygons.size());
+ half_edge_mesh.reserveFaces(face_vertex_mesh.polygons.size());
- assert (half_edge_mesh.sizeVertices () == vertices.size ());
+ for (const auto& vertex : vertices) {
+ half_edge_mesh.addVertex(vertex);
+ }
- int count_not_added = 0;
- VertexIndices vi;
- vi.reserve (3); // Minimum number (triangle)
- for (const auto &polygon : face_vertex_mesh.polygons)
- {
- vi.clear ();
- for (const unsigned int &vertex : polygon.vertices)
- {
- vi.emplace_back (vertex);
- }
+ assert(half_edge_mesh.sizeVertices() == vertices.size());
- if (!half_edge_mesh.addFace (vi).isValid ())
- {
- ++count_not_added;
- }
- }
+ int count_not_added = 0;
+ VertexIndices vi;
+ vi.reserve(3); // Minimum number (triangle)
+ for (const auto& polygon : face_vertex_mesh.polygons) {
+ vi.clear();
+ for (const auto& vertex : polygon.vertices) {
+ vi.emplace_back(vertex);
+ }
- return (count_not_added);
+ if (!half_edge_mesh.addFace(vi).isValid()) {
+ ++count_not_added;
}
- } // End namespace geometry
+ }
+
+ return (count_not_added);
+}
+} // End namespace geometry
} // End namespace pcl
#include <pcl/geometry/mesh_indices.h>
-namespace pcl
-{
- namespace geometry
- {
- template <class DerivedT, class MeshTraitsT, class MeshTagT>
- class MeshBase;
-
- template <class MeshT>
- class MeshIO;
- } // End namespace geometry
+namespace pcl {
+namespace geometry {
+template <class DerivedT, class MeshTraitsT, class MeshTagT>
+class MeshBase;
+
+template <class MeshT>
+class MeshIO;
+} // End namespace geometry
} // End namespace pcl
////////////////////////////////////////////////////////////////////////////////
// Vertex
////////////////////////////////////////////////////////////////////////////////
-namespace pcl
-{
- namespace geometry
- {
- /** \brief A vertex is a node in the mesh.
- * \author Martin Saelzle
- * \ingroup geometry
- */
- class Vertex
- {
- private:
-
- using HalfEdgeIndex = pcl::geometry::HalfEdgeIndex;
-
- /** \brief Constructor.
- * \param[in] idx_outgoing_half_edge Index to the outgoing half-edge. Defaults to an invalid index.
- */
- explicit Vertex (const HalfEdgeIndex& idx_outgoing_half_edge = HalfEdgeIndex ())
- : idx_outgoing_half_edge_ (idx_outgoing_half_edge)
- {}
-
- /** \brief Index to the outgoing half-edge. The vertex is considered to be deleted if it stores an invalid outgoing half-edge index. */
- HalfEdgeIndex idx_outgoing_half_edge_;
-
- template <class DerivedT, class MeshTraitsT, class MeshTagT>
- friend class pcl::geometry::MeshBase;
-
- template <class MeshT>
- friend class pcl::geometry::MeshIO;
- };
- } // End namespace geometry
+namespace pcl {
+namespace geometry {
+/** \brief A vertex is a node in the mesh.
+ * \author Martin Saelzle
+ * \ingroup geometry
+ */
+class Vertex {
+private:
+ using HalfEdgeIndex = pcl::geometry::HalfEdgeIndex;
+
+ /** \brief Constructor.
+ * \param[in] idx_outgoing_half_edge Index to the outgoing half-edge. Defaults to an
+ * invalid index.
+ */
+ explicit Vertex(const HalfEdgeIndex& idx_outgoing_half_edge = HalfEdgeIndex())
+ : idx_outgoing_half_edge_(idx_outgoing_half_edge)
+ {}
+
+ /** \brief Index to the outgoing half-edge. The vertex is considered to be deleted if
+ * it stores an invalid outgoing half-edge index. */
+ HalfEdgeIndex idx_outgoing_half_edge_;
+
+ template <class DerivedT, class MeshTraitsT, class MeshTagT>
+ friend class pcl::geometry::MeshBase;
+
+ template <class MeshT>
+ friend class pcl::geometry::MeshIO;
+};
+} // End namespace geometry
} // End namespace pcl
////////////////////////////////////////////////////////////////////////////////
// HalfEdge
////////////////////////////////////////////////////////////////////////////////
-namespace pcl
-{
- namespace geometry
- {
- /** \brief An edge is a connection between two vertices. In a half-edge mesh the edge is split into two half-edges with opposite orientation. Each half-edge stores the index to the terminating vertex, the next half-edge, the previous half-edge and the face it belongs to. The opposite half-edge is accessed implicitly.
- * \author Martin Saelzle
- * \ingroup geometry
- */
- class HalfEdge
- {
- private:
-
- using VertexIndex = pcl::geometry::VertexIndex;
- using HalfEdgeIndex = pcl::geometry::HalfEdgeIndex;
- using FaceIndex = pcl::geometry::FaceIndex;
-
- /** \brief Constructor.
- * \param[in] idx_terminating_vertex Index to the terminating vertex. Defaults to an invalid index.
- * \param[in] idx_next_half_edge Index to the next half-edge. Defaults to an invalid index.
- * \param[in] idx_prev_half_edge Index to the previous half-edge. Defaults to an invalid index.
- * \param[in] idx_face Index to the face. Defaults to an invalid index.
- */
- explicit HalfEdge (const VertexIndex& idx_terminating_vertex = VertexIndex (),
- const HalfEdgeIndex& idx_next_half_edge = HalfEdgeIndex (),
- const HalfEdgeIndex& idx_prev_half_edge = HalfEdgeIndex (),
- const FaceIndex& idx_face = FaceIndex ())
- : idx_terminating_vertex_ (idx_terminating_vertex),
- idx_next_half_edge_ (idx_next_half_edge),
- idx_prev_half_edge_ (idx_prev_half_edge),
- idx_face_ (idx_face)
- {
- }
-
- /** \brief Index to the terminating vertex. The half-edge is considered to be deleted if it stores an invalid terminating vertex index. */
- VertexIndex idx_terminating_vertex_;
-
- /** \brief Index to the next half-edge. */
- HalfEdgeIndex idx_next_half_edge_;
-
- /** \brief Index to the previous half-edge. */
- HalfEdgeIndex idx_prev_half_edge_;
-
- /** \brief Index to the face. The half-edge is considered to be on the boundary if it stores an invalid face index. */
- FaceIndex idx_face_;
-
- template <class DerivedT, class MeshTraitsT, class MeshTagT>
- friend class pcl::geometry::MeshBase;
-
- template <class MeshT>
- friend class pcl::geometry::MeshIO;
- };
- } // End namespace geometry
+namespace pcl {
+namespace geometry {
+/** \brief An edge is a connection between two vertices. In a half-edge mesh the edge is
+ * split into two half-edges with opposite orientation. Each half-edge stores the index
+ * to the terminating vertex, the next half-edge, the previous half-edge and the face it
+ * belongs to. The opposite half-edge is accessed implicitly.
+ * \author Martin Saelzle
+ * \ingroup geometry
+ */
+class HalfEdge {
+private:
+ using VertexIndex = pcl::geometry::VertexIndex;
+ using HalfEdgeIndex = pcl::geometry::HalfEdgeIndex;
+ using FaceIndex = pcl::geometry::FaceIndex;
+
+ /** \brief Constructor.
+ * \param[in] idx_terminating_vertex Index to the terminating vertex. Defaults to an
+ * invalid index.
+ * \param[in] idx_next_half_edge Index to the next half-edge.
+ * Defaults to an invalid index.
+ * \param[in] idx_prev_half_edge Index to the
+ * previous half-edge. Defaults to an invalid index.
+ * \param[in] idx_face Index to the
+ * face. Defaults to an invalid index.
+ */
+ explicit HalfEdge(const VertexIndex& idx_terminating_vertex = VertexIndex(),
+ const HalfEdgeIndex& idx_next_half_edge = HalfEdgeIndex(),
+ const HalfEdgeIndex& idx_prev_half_edge = HalfEdgeIndex(),
+ const FaceIndex& idx_face = FaceIndex())
+ : idx_terminating_vertex_(idx_terminating_vertex)
+ , idx_next_half_edge_(idx_next_half_edge)
+ , idx_prev_half_edge_(idx_prev_half_edge)
+ , idx_face_(idx_face)
+ {}
+
+ /** \brief Index to the terminating vertex. The half-edge is considered to be deleted
+ * if it stores an invalid terminating vertex index. */
+ VertexIndex idx_terminating_vertex_;
+
+ /** \brief Index to the next half-edge. */
+ HalfEdgeIndex idx_next_half_edge_;
+
+ /** \brief Index to the previous half-edge. */
+ HalfEdgeIndex idx_prev_half_edge_;
+
+ /** \brief Index to the face. The half-edge is considered to be on the boundary if it
+ * stores an invalid face index. */
+ FaceIndex idx_face_;
+
+ template <class DerivedT, class MeshTraitsT, class MeshTagT>
+ friend class pcl::geometry::MeshBase;
+
+ template <class MeshT>
+ friend class pcl::geometry::MeshIO;
+};
+} // End namespace geometry
} // End namespace pcl
////////////////////////////////////////////////////////////////////////////////
// Face
////////////////////////////////////////////////////////////////////////////////
-namespace pcl
-{
- namespace geometry
- {
- /** \brief A face is a closed loop of edges.
- * \author Martin Saelzle
- * \ingroup geometry
- */
- class Face
- {
- private:
-
- using HalfEdgeIndex = pcl::geometry::HalfEdgeIndex;
-
- /** \brief Constructor.
- * \param[in] inner_half_edge_idx Index to the outgoing half-edge. Defaults to an invalid index
- */
- explicit Face (const HalfEdgeIndex& idx_inner_half_edge = HalfEdgeIndex ())
- : idx_inner_half_edge_ (idx_inner_half_edge)
- {}
-
- /** \brief Index to the inner half-edge. The face is considered to be deleted if it stores an invalid inner half-edge index. */
- HalfEdgeIndex idx_inner_half_edge_;
-
- template <class DerivedT, class MeshTraitsT, class MeshTagT>
- friend class pcl::geometry::MeshBase;
-
- template <class MeshT>
- friend class pcl::geometry::MeshIO;
- };
- } // End namespace geometry
+namespace pcl {
+namespace geometry {
+/** \brief A face is a closed loop of edges.
+ * \author Martin Saelzle
+ * \ingroup geometry
+ */
+class Face {
+private:
+ using HalfEdgeIndex = pcl::geometry::HalfEdgeIndex;
+
+ /** \brief Constructor.
+ * \param[in] inner_half_edge_idx Index to the outgoing half-edge. Defaults to an
+ * invalid index
+ */
+ explicit Face(const HalfEdgeIndex& idx_inner_half_edge = HalfEdgeIndex())
+ : idx_inner_half_edge_(idx_inner_half_edge)
+ {}
+
+ /** \brief Index to the inner half-edge. The face is considered to be deleted if it
+ * stores an invalid inner half-edge index. */
+ HalfEdgeIndex idx_inner_half_edge_;
+
+ template <class DerivedT, class MeshTraitsT, class MeshTagT>
+ friend class pcl::geometry::MeshBase;
+
+ template <class MeshT>
+ friend class pcl::geometry::MeshIO;
+};
+} // End namespace geometry
} // End namespace pcl
*
*/
-// NOTE: This file has been created with 'pcl_src/geometry/include/pcl/geometry/mesh_indices.py'
-
#pragma once
-#include <iostream>
+#include <boost/operators.hpp>
-#include <pcl/geometry/boost.h>
+#include <iostream>
////////////////////////////////////////////////////////////////////////////////
-// VertexIndex
+// MeshIndex
////////////////////////////////////////////////////////////////////////////////
-namespace pcl
-{
- namespace geometry
+namespace pcl {
+namespace detail {
+
+template <class IndexTagT>
+class MeshIndex;
+
+template <class IndexTagT>
+std::istream&
+operator>>(std::istream& is, MeshIndex<IndexTagT>&);
+
+template <class IndexTagT>
+class MeshIndex
+: boost::totally_ordered<
+ MeshIndex<IndexTagT>, // < > <= >= == !=
+ boost::unit_steppable<MeshIndex<IndexTagT>, // ++ -- (pre and post)
+ boost::additive<MeshIndex<IndexTagT> // += +
+ // -= -
+ >>> {
+
+public:
+ using Base = boost::totally_ordered<
+ MeshIndex<IndexTagT>,
+ boost::unit_steppable<MeshIndex<IndexTagT>,
+ boost::additive<MeshIndex<IndexTagT>>>>;
+ using Self = MeshIndex<IndexTagT>;
+
+ /** \brief Constructor. Initializes with an invalid index. */
+ MeshIndex() : index_(-1) {}
+
+ /** \brief Constructor.
+ * \param[in] index The integer index.
+ */
+ explicit MeshIndex(const int index) : index_(index) {}
+
+ /** \brief Returns true if the index is valid. */
+ inline bool
+ isValid() const
{
- /** \brief Index used to access elements in the half-edge mesh. It is basically just a wrapper around an integer with a few added methods.
- * \author Martin Saelzle
- * \ingroup geometry
- */
- class VertexIndex
- : boost::totally_ordered <pcl::geometry::VertexIndex // < > <= >= == !=
- , boost::unit_steppable <pcl::geometry::VertexIndex // ++ -- (pre and post)
- , boost::additive <pcl::geometry::VertexIndex // += + -= -
- > > >
- {
- public:
-
- using Base = boost::totally_ordered <pcl::geometry::VertexIndex,
- boost::unit_steppable <pcl::geometry::VertexIndex,
- boost::additive <pcl::geometry::VertexIndex> > >;
- using Self = pcl::geometry::VertexIndex;
-
- /** \brief Constructor. Initializes with an invalid index. */
- VertexIndex ()
- : index_ (-1)
- {
- }
-
- /** \brief Constructor.
- * \param[in] index The integer index.
- */
- explicit VertexIndex (const int index)
- : index_ (index)
- {
- }
-
- /** \brief Returns true if the index is valid. */
- inline bool
- isValid () const
- {
- return (index_ >= 0);
- }
-
- /** \brief Invalidate the index. */
- inline void
- invalidate ()
- {
- index_ = -1;
- }
-
- /** \brief Get the index. */
- inline int
- get () const
- {
- return (index_);
- }
-
- /** \brief Set the index. */
- inline void
- set (const int index)
- {
- index_ = index;
- }
-
- /** \brief Comparison operators (with boost::operators): < > <= >= */
- inline bool
- operator < (const Self& other) const
- {
- return (this->get () < other.get ());
- }
-
- /** \brief Comparison operators (with boost::operators): == != */
- inline bool
- operator == (const Self& other) const
- {
- return (this->get () == other.get ());
- }
+ return (index_ >= 0);
+ }
- /** \brief Increment operators (with boost::operators): ++ (pre and post) */
- inline Self&
- operator ++ ()
- {
- ++index_;
- return (*this);
- }
-
- /** \brief Decrement operators (with boost::operators): \-\- (pre and post) */
- inline Self&
- operator -- ()
- {
- --index_;
- return (*this);
- }
-
- /** \brief Addition operators (with boost::operators): + += */
- inline Self&
- operator += (const Self& other)
- {
- index_ += other.get ();
- return (*this);
- }
-
- /** \brief Subtraction operators (with boost::operators): - -= */
- inline Self&
- operator -= (const Self& other)
- {
- index_ -= other.get ();
- return (*this);
- }
-
- private:
-
- /** \brief Stored index. */
- int index_;
-
- friend std::istream&
- operator >> (std::istream& is, pcl::geometry::VertexIndex& index);
- };
-
- /** \brief ostream operator. */
- inline std::ostream&
- operator << (std::ostream& os, const pcl::geometry::VertexIndex& index)
- {
- return (os << index.get ());
- }
-
- /** \brief istream operator. */
- inline std::istream&
- operator >> (std::istream& is, pcl::geometry::VertexIndex& index)
- {
- return (is >> index.index_);
- }
-
- } // End namespace geometry
-} // End namespace pcl
-
-////////////////////////////////////////////////////////////////////////////////
-// HalfEdgeIndex
-////////////////////////////////////////////////////////////////////////////////
-
-namespace pcl
-{
- namespace geometry
+ /** \brief Invalidate the index. */
+ inline void
+ invalidate()
{
- /** \brief Index used to access elements in the half-edge mesh. It is basically just a wrapper around an integer with a few added methods.
- * \author Martin Saelzle
- * \ingroup geometry
- */
- class HalfEdgeIndex
- : boost::totally_ordered <pcl::geometry::HalfEdgeIndex // < > <= >= == !=
- , boost::unit_steppable <pcl::geometry::HalfEdgeIndex // ++ -- (pre and post)
- , boost::additive <pcl::geometry::HalfEdgeIndex // += + -= -
- > > >
- {
- public:
-
- using Base = boost::totally_ordered <pcl::geometry::HalfEdgeIndex,
- boost::unit_steppable <pcl::geometry::HalfEdgeIndex,
- boost::additive <pcl::geometry::HalfEdgeIndex> > >;
- using Self = pcl::geometry::HalfEdgeIndex;
-
- /** \brief Constructor. Initializes with an invalid index. */
- HalfEdgeIndex ()
- : index_ (-1)
- {
- }
-
- /** \brief Constructor.
- * \param[in] index The integer index.
- */
- explicit HalfEdgeIndex (const int index)
- : index_ (index)
- {
- }
-
- /** \brief Returns true if the index is valid. */
- inline bool
- isValid () const
- {
- return (index_ >= 0);
- }
-
- /** \brief Invalidate the index. */
- inline void
- invalidate ()
- {
- index_ = -1;
- }
-
- /** \brief Get the index. */
- inline int
- get () const
- {
- return (index_);
- }
-
- /** \brief Set the index. */
- inline void
- set (const int index)
- {
- index_ = index;
- }
-
- /** \brief Comparison operators (with boost::operators): < > <= >= */
- inline bool
- operator < (const Self& other) const
- {
- return (this->get () < other.get ());
- }
-
- /** \brief Comparison operators (with boost::operators): == != */
- inline bool
- operator == (const Self& other) const
- {
- return (this->get () == other.get ());
- }
-
- /** \brief Increment operators (with boost::operators): ++ (pre and post) */
- inline Self&
- operator ++ ()
- {
- ++index_;
- return (*this);
- }
-
- /** \brief Decrement operators (with boost::operators): \-\- (pre and post) */
- inline Self&
- operator -- ()
- {
- --index_;
- return (*this);
- }
-
- /** \brief Addition operators (with boost::operators): + += */
- inline Self&
- operator += (const Self& other)
- {
- index_ += other.get ();
- return (*this);
- }
-
- /** \brief Subtraction operators (with boost::operators): - -= */
- inline Self&
- operator -= (const Self& other)
- {
- index_ -= other.get ();
- return (*this);
- }
-
- private:
-
- /** \brief Stored index. */
- int index_;
-
- friend std::istream&
- operator >> (std::istream& is, pcl::geometry::HalfEdgeIndex& index);
- };
-
- /** \brief ostream operator. */
- inline std::ostream&
- operator << (std::ostream& os, const pcl::geometry::HalfEdgeIndex& index)
- {
- return (os << index.get ());
- }
-
- /** \brief istream operator. */
- inline std::istream&
- operator >> (std::istream& is, pcl::geometry::HalfEdgeIndex& index)
- {
- return (is >> index.index_);
- }
-
- } // End namespace geometry
-} // End namespace pcl
-
-////////////////////////////////////////////////////////////////////////////////
-// EdgeIndex
-////////////////////////////////////////////////////////////////////////////////
+ index_ = -1;
+ }
-namespace pcl
-{
- namespace geometry
+ /** \brief Get the index. */
+ inline int
+ get() const
{
- /** \brief Index used to access elements in the half-edge mesh. It is basically just a wrapper around an integer with a few added methods.
- * \author Martin Saelzle
- * \ingroup geometry
- */
- class EdgeIndex
- : boost::totally_ordered <pcl::geometry::EdgeIndex // < > <= >= == !=
- , boost::unit_steppable <pcl::geometry::EdgeIndex // ++ -- (pre and post)
- , boost::additive <pcl::geometry::EdgeIndex // += + -= -
- > > >
- {
- public:
-
- using Base = boost::totally_ordered <pcl::geometry::EdgeIndex,
- boost::unit_steppable <pcl::geometry::EdgeIndex,
- boost::additive <pcl::geometry::EdgeIndex> > >;
- using Self = pcl::geometry::EdgeIndex;
-
- /** \brief Constructor. Initializes with an invalid index. */
- EdgeIndex ()
- : index_ (-1)
- {
- }
-
- /** \brief Constructor.
- * \param[in] index The integer index.
- */
- explicit EdgeIndex (const int index)
- : index_ (index)
- {
- }
-
- /** \brief Returns true if the index is valid. */
- inline bool
- isValid () const
- {
- return (index_ >= 0);
- }
-
- /** \brief Invalidate the index. */
- inline void
- invalidate ()
- {
- index_ = -1;
- }
-
- /** \brief Get the index. */
- inline int
- get () const
- {
- return (index_);
- }
-
- /** \brief Set the index. */
- inline void
- set (const int index)
- {
- index_ = index;
- }
-
- /** \brief Comparison operators (with boost::operators): < > <= >= */
- inline bool
- operator < (const Self& other) const
- {
- return (this->get () < other.get ());
- }
-
- /** \brief Comparison operators (with boost::operators): == != */
- inline bool
- operator == (const Self& other) const
- {
- return (this->get () == other.get ());
- }
-
- /** \brief Increment operators (with boost::operators): ++ (pre and post) */
- inline Self&
- operator ++ ()
- {
- ++index_;
- return (*this);
- }
+ return (index_);
+ }
- /** \brief Decrement operators (with boost::operators): \-\- (pre and post) */
- inline Self&
- operator -- ()
- {
- --index_;
- return (*this);
- }
-
- /** \brief Addition operators (with boost::operators): + += */
- inline Self&
- operator += (const Self& other)
- {
- index_ += other.get ();
- return (*this);
- }
-
- /** \brief Subtraction operators (with boost::operators): - -= */
- inline Self&
- operator -= (const Self& other)
- {
- index_ -= other.get ();
- return (*this);
- }
-
- private:
-
- /** \brief Stored index. */
- int index_;
-
- friend std::istream&
- operator >> (std::istream& is, pcl::geometry::EdgeIndex& index);
- };
-
- /** \brief ostream operator. */
- inline std::ostream&
- operator << (std::ostream& os, const pcl::geometry::EdgeIndex& index)
- {
- return (os << index.get ());
- }
-
- /** \brief istream operator. */
- inline std::istream&
- operator >> (std::istream& is, pcl::geometry::EdgeIndex& index)
- {
- return (is >> index.index_);
- }
-
- } // End namespace geometry
-} // End namespace pcl
-
-////////////////////////////////////////////////////////////////////////////////
-// FaceIndex
-////////////////////////////////////////////////////////////////////////////////
-
-namespace pcl
-{
- namespace geometry
+ /** \brief Set the index. */
+ inline void
+ set(const int index)
{
- /** \brief Index used to access elements in the half-edge mesh. It is basically just a wrapper around an integer with a few added methods.
- * \author Martin Saelzle
- * \ingroup geometry
- */
- class FaceIndex
- : boost::totally_ordered <pcl::geometry::FaceIndex // < > <= >= == !=
- , boost::unit_steppable <pcl::geometry::FaceIndex // ++ -- (pre and post)
- , boost::additive <pcl::geometry::FaceIndex // += + -= -
- > > >
- {
- public:
-
- using Base = boost::totally_ordered <pcl::geometry::FaceIndex,
- boost::unit_steppable <pcl::geometry::FaceIndex,
- boost::additive <pcl::geometry::FaceIndex> > >;
- using Self = pcl::geometry::FaceIndex;
-
- /** \brief Constructor. Initializes with an invalid index. */
- FaceIndex ()
- : index_ (-1)
- {
- }
-
- /** \brief Constructor.
- * \param[in] index The integer index.
- */
- explicit FaceIndex (const int index)
- : index_ (index)
- {
- }
-
- /** \brief Returns true if the index is valid. */
- inline bool
- isValid () const
- {
- return (index_ >= 0);
- }
+ index_ = index;
+ }
- /** \brief Invalidate the index. */
- inline void
- invalidate ()
- {
- index_ = -1;
- }
-
- /** \brief Get the index. */
- inline int
- get () const
- {
- return (index_);
- }
-
- /** \brief Set the index. */
- inline void
- set (const int index)
- {
- index_ = index;
- }
+ /** \brief Comparison operators (with boost::operators): < > <= >= */
+ inline bool
+ operator<(const Self& other) const
+ {
+ return (this->get() < other.get());
+ }
- /** \brief Comparison operators (with boost::operators): < > <= >= */
- inline bool
- operator < (const Self& other) const
- {
- return (this->get () < other.get ());
- }
+ /** \brief Comparison operators (with boost::operators): == != */
+ inline bool
+ operator==(const Self& other) const
+ {
+ return (this->get() == other.get());
+ }
- /** \brief Comparison operators (with boost::operators): == != */
- inline bool
- operator == (const Self& other) const
- {
- return (this->get () == other.get ());
- }
+ /** \brief Increment operators (with boost::operators): ++ (pre and post) */
+ inline Self&
+ operator++()
+ {
+ ++index_;
+ return (*this);
+ }
- /** \brief Increment operators (with boost::operators): ++ (pre and post) */
- inline Self&
- operator ++ ()
- {
- ++index_;
- return (*this);
- }
+ /** \brief Decrement operators (with boost::operators): \-\- (pre and post) */
+ inline Self&
+ operator--()
+ {
+ --index_;
+ return (*this);
+ }
- /** \brief Decrement operators (with boost::operators): \-\- (pre and post) */
- inline Self&
- operator -- ()
- {
- --index_;
- return (*this);
- }
+ /** \brief Addition operators (with boost::operators): + += */
+ inline Self&
+ operator+=(const Self& other)
+ {
+ index_ += other.get();
+ return (*this);
+ }
- /** \brief Addition operators (with boost::operators): + += */
- inline Self&
- operator += (const Self& other)
- {
- index_ += other.get ();
- return (*this);
- }
+ /** \brief Subtraction operators (with boost::operators): - -= */
+ inline Self&
+ operator-=(const Self& other)
+ {
+ index_ -= other.get();
+ return (*this);
+ }
- /** \brief Subtraction operators (with boost::operators): - -= */
- inline Self&
- operator -= (const Self& other)
- {
- index_ -= other.get ();
- return (*this);
- }
+private:
+ /** \brief Stored index. */
+ int index_;
- private:
+ friend std::istream& operator>><>(std::istream& is, MeshIndex<IndexTagT>& index);
+};
- /** \brief Stored index. */
- int index_;
+/** \brief ostream operator. */
+template <class IndexTagT>
+inline std::ostream&
+operator<<(std::ostream& os, const MeshIndex<IndexTagT>& index)
+{
+ return (os << index.get());
+}
- friend std::istream&
- operator >> (std::istream& is, pcl::geometry::FaceIndex& index);
- };
+/** \brief istream operator. */
+template <class IndexTagT>
+inline std::istream&
+operator>>(std::istream& is, MeshIndex<IndexTagT>& index)
+{
+ return (is >> index.index_);
+}
- /** \brief ostream operator. */
- inline std::ostream&
- operator << (std::ostream& os, const pcl::geometry::FaceIndex& index)
- {
- return (os << index.get ());
- }
+} // End namespace detail
+} // End namespace pcl
- /** \brief istream operator. */
- inline std::istream&
- operator >> (std::istream& is, pcl::geometry::FaceIndex& index)
- {
- return (is >> index.index_);
- }
+namespace pcl {
+namespace geometry {
+/** \brief Index used to access elements in the half-edge mesh. It is basically just a
+ * wrapper around an integer with a few added methods.
+ * \author Martin Saelzle
+ * \ingroup geometry
+ */
+using VertexIndex = pcl::detail::MeshIndex<struct VertexIndexTag>;
+/** \brief Index used to access elements in the half-edge mesh. It is basically just a
+ * wrapper around an integer with a few added methods.
+ * \author Martin Saelzle
+ * \ingroup geometry
+ */
+using HalfEdgeIndex = pcl::detail::MeshIndex<struct HalfEdgeIndexTag>;
+/** \brief Index used to access elements in the half-edge mesh. It is basically just a
+ * wrapper around an integer with a few added methods.
+ * \author Martin Saelzle
+ * \ingroup geometry
+ */
+using EdgeIndex = pcl::detail::MeshIndex<struct EdgeIndexTag>;
+/** \brief Index used to access elements in the half-edge mesh. It is basically just a
+ * wrapper around an integer with a few added methods.
+ * \author Martin Saelzle
+ * \ingroup geometry
+ */
+using FaceIndex = pcl::detail::MeshIndex<struct FaceIndexTag>;
- } // End namespace geometry
+} // End namespace geometry
} // End namespace pcl
////////////////////////////////////////////////////////////////////////////////
// Conversions
////////////////////////////////////////////////////////////////////////////////
-namespace pcl
+namespace pcl {
+namespace geometry {
+/** \brief Convert the given half-edge index to an edge index. */
+inline EdgeIndex
+toEdgeIndex(const HalfEdgeIndex& index)
{
- namespace geometry
- {
- /** \brief Convert the given half-edge index to an edge index. */
- inline pcl::geometry::EdgeIndex
- toEdgeIndex (const HalfEdgeIndex& index)
- {
- return (index.isValid () ? EdgeIndex (index.get () / 2) : EdgeIndex ());
- }
+ return (index.isValid() ? EdgeIndex(index.get() / 2) : EdgeIndex());
+}
- /** \brief Convert the given edge index to a half-edge index.
- * \param index
- * \param[in] get_first The first half-edge of the edge is returned if this variable is true; elsewise the second.
- */
- inline pcl::geometry::HalfEdgeIndex
- toHalfEdgeIndex (const EdgeIndex& index, const bool get_first=true)
- {
- return (index.isValid () ? HalfEdgeIndex (index.get () * 2 + static_cast <int> (!get_first)) : HalfEdgeIndex ());
- }
- } // End namespace geometry
+/** \brief Convert the given edge index to a half-edge index.
+ * \param index
+ * \param[in] get_first The first half-edge of the edge is returned if this
+ * variable is true; elsewise the second.
+ */
+inline HalfEdgeIndex
+toHalfEdgeIndex(const EdgeIndex& index, const bool get_first = true)
+{
+ return (index.isValid()
+ ? HalfEdgeIndex(index.get() * 2 + static_cast<int>(!get_first))
+ : HalfEdgeIndex());
+}
+} // End namespace geometry
} // End namespace pcl
+++ /dev/null
-##
-# Software License Agreement (BSD License)
-#
-# Point Cloud Library (PCL) - www.pointclouds.org
-# Copyright (c) 2009-2012, Willow Garage, Inc.
-# Copyright (c) 2012-, Open Perception, Inc.
-#
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# # Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# # Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# # Neither the name of the copyright holder(s) nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
-# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-# POSSIBILITY OF SUCH DAMAGE.
-#
-
-import os
-
-filename = os.path.join (os.path.dirname (__file__), 'mesh_indices.h')
-class_names = ['VertexIndex', 'HalfEdgeIndex', 'EdgeIndex', 'FaceIndex']
-
-################################################################################
-
-f = open (filename, 'w')
-
-f.write ('/*\n')
-f.write (' * Software License Agreement (BSD License)\n')
-f.write (' *\n')
-f.write (' * Point Cloud Library (PCL) - www.pointclouds.org\n')
-f.write (' * Copyright (c) 2009-2012, Willow Garage, Inc.\n')
-f.write (' * Copyright (c) 2012-, Open Perception, Inc.\n')
-f.write (' *\n')
-f.write (' * All rights reserved.\n')
-f.write (' *\n')
-f.write (' * Redistribution and use in source and binary forms, with or without\n')
-f.write (' * modification, are permitted provided that the following conditions\n')
-f.write (' * are met:\n')
-f.write (' *\n')
-f.write (' * * Redistributions of source code must retain the above copyright\n')
-f.write (' * notice, this list of conditions and the following disclaimer.\n')
-f.write (' * * Redistributions in binary form must reproduce the above\n')
-f.write (' * copyright notice, this list of conditions and the following\n')
-f.write (' * disclaimer in the documentation and/or other materials provided\n')
-f.write (' * with the distribution.\n')
-f.write (' * * Neither the name of the copyright holder(s) nor the names of its\n')
-f.write (' * contributors may be used to endorse or promote products derived\n')
-f.write (' * from this software without specific prior written permission.\n')
-f.write (' *\n')
-f.write (' * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n')
-f.write (' * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n')
-f.write (' * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n')
-f.write (' * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n')
-f.write (' * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n')
-f.write (' * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n')
-f.write (' * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n')
-f.write (' * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n')
-f.write (' * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n')
-f.write (' * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n')
-f.write (' * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n')
-f.write (' * POSSIBILITY OF SUCH DAMAGE.\n')
-f.write (' *\n')
-f.write (' * $Id$\n')
-f.write (' *\n')
-f.write (' */\n\n')
-
-f.write ("// NOTE: This file has been created with 'pcl_src/geometry/include/pcl/geometry/mesh_indices.py'\n\n")
-
-f.write ('#ifndef PCL_GEOMETRY_MESH_INDICES_H\n')
-f.write ('#define PCL_GEOMETRY_MESH_INDICES_H\n\n')
-
-f.write ('#include <iostream>\n\n')
-
-f.write ('#include <pcl/geometry/boost.h>\n\n')
-
-for cn in class_names:
-
- f.write ('////////////////////////////////////////////////////////////////////////////////\n')
- f.write ('// ' + cn + '\n')
- f.write ('////////////////////////////////////////////////////////////////////////////////\n\n')
-
- f.write ('namespace pcl\n')
- f.write ('{\n')
- f.write (' namespace geometry\n')
- f.write (' {\n')
- f.write (' /** \\brief Index used to access elements in the half-edge mesh. It is basically just a wrapper around an integer with a few added methods.\n')
- f.write (' * \\author Martin Saelzle\n')
- f.write (' * \ingroup geometry\n')
- f.write (' */\n')
- f.write (' class ' + cn + '\n')
- f.write (' : boost::totally_ordered <pcl::geometry::' + cn + ' // < > <= >= == !=\n')
- f.write (' , boost::unit_steppable <pcl::geometry::' + cn + ' // ++ -- (pre and post)\n')
- f.write (' , boost::additive <pcl::geometry::' + cn + ' // += + -= -\n')
- f.write (' > > >\n')
- f.write (' {\n')
- f.write (' public:\n\n')
-
- f.write (' typedef boost::totally_ordered <pcl::geometry::' + cn + ',\n')
- f.write (' boost::unit_steppable <pcl::geometry::' + cn + ',\n')
- f.write (' boost::additive <pcl::geometry::' + cn + '> > > Base;\n')
- f.write (' typedef pcl::geometry::' + cn + ' Self;\n\n')
-
- f.write (' /** \\brief Constructor. Initializes with an invalid index. */\n')
- f.write (' ' + cn + ' ()\n')
- f.write (' : index_ (-1)\n')
- f.write (' {\n')
- f.write (' }\n\n')
-
- f.write (' /** \\brief Constructor.\n')
- f.write (' * \param[in] index The integer index.\n')
- f.write (' */\n')
- f.write (' explicit ' + cn + ' (const int index)\n')
- f.write (' : index_ (index)\n')
- f.write (' {\n')
- f.write (' }\n\n')
-
- f.write (' /** \\brief Returns true if the index is valid. */\n')
- f.write (' inline bool\n')
- f.write (' isValid () const\n')
- f.write (' {\n')
- f.write (' return (index_ >= 0);\n')
- f.write (' }\n\n')
-
- f.write (' /** \\brief Invalidate the index. */\n')
- f.write (' inline void\n')
- f.write (' invalidate ()\n')
- f.write (' {\n')
- f.write (' index_ = -1;\n')
- f.write (' }\n\n')
-
- f.write (' /** \\brief Get the index. */\n')
- f.write (' inline int\n')
- f.write (' get () const\n')
- f.write (' {\n')
- f.write (' return (index_);\n')
- f.write (' }\n\n')
-
- f.write (' /** \\brief Set the index. */\n')
- f.write (' inline void\n')
- f.write (' set (const int index)\n')
- f.write (' {\n')
- f.write (' index_ = index;\n')
- f.write (' }\n\n')
-
- f.write (' /** \\brief Comparison operators (with boost::operators): < > <= >= */\n')
- f.write (' inline bool\n')
- f.write (' operator < (const Self& other) const\n')
- f.write (' {\n')
- f.write (' return (this->get () < other.get ());\n')
- f.write (' }\n\n')
-
- f.write (' /** \\brief Comparison operators (with boost::operators): == != */\n')
- f.write (' inline bool\n')
- f.write (' operator == (const Self& other) const\n')
- f.write (' {\n')
- f.write (' return (this->get () == other.get ());\n')
- f.write (' }\n\n')
-
- f.write (' /** \\brief Increment operators (with boost::operators): ++ (pre and post) */\n')
- f.write (' inline Self&\n')
- f.write (' operator ++ ()\n')
- f.write (' {\n')
- f.write (' ++index_;\n')
- f.write (' return (*this);\n')
- f.write (' }\n\n')
-
- f.write (' /** \\brief Decrement operators (with boost::operators): \-\- (pre and post) */\n')
- f.write (' inline Self&\n')
- f.write (' operator -- ()\n')
- f.write (' {\n')
- f.write (' --index_;\n')
- f.write (' return (*this);\n')
- f.write (' }\n\n')
-
- f.write (' /** \\brief Addition operators (with boost::operators): + += */\n')
- f.write (' inline Self&\n')
- f.write (' operator += (const Self& other)\n')
- f.write (' {\n')
- f.write (' index_ += other.get ();\n')
- f.write (' return (*this);\n')
- f.write (' }\n\n')
-
- f.write (' /** \\brief Subtraction operators (with boost::operators): - -= */\n')
- f.write (' inline Self&\n')
- f.write (' operator -= (const Self& other)\n')
- f.write (' {\n')
- f.write (' index_ -= other.get ();\n')
- f.write (' return (*this);\n')
- f.write (' }\n\n')
-
- f.write (' private:\n\n')
-
- f.write (' /** \\brief Stored index. */\n')
- f.write (' int index_;\n\n')
-
- f.write (' friend std::istream&\n')
- f.write (' operator >> (std::istream& is, pcl::geometry::' + cn + '& index);\n')
- f.write (' };\n\n')
-
- f.write (' /** \\brief ostream operator. */\n')
- f.write (' inline std::ostream&\n')
- f.write (' operator << (std::ostream& os, const pcl::geometry::' + cn + '& index)\n')
- f.write (' {\n')
- f.write (' return (os << index.get ());\n')
- f.write (' }\n\n')
-
- f.write (' /** \\brief istream operator. */\n')
- f.write (' inline std::istream&\n')
- f.write (' operator >> (std::istream& is, pcl::geometry::' + cn + '& index)\n')
- f.write (' {\n')
- f.write (' return (is >> index.index_);\n')
- f.write (' }\n\n')
-
- f.write (' } // End namespace geometry\n')
- f.write ('} // End namespace pcl\n\n')
-
-f.write ('////////////////////////////////////////////////////////////////////////////////\n')
-f.write ('// Conversions\n')
-f.write ('////////////////////////////////////////////////////////////////////////////////\n\n')
-
-f.write ('namespace pcl\n')
-f.write ('{\n')
-f.write (' namespace geometry\n')
-f.write (' {\n')
-f.write (' /** \\brief Convert the given half-edge index to an edge index. */\n')
-f.write (' inline pcl::geometry::EdgeIndex\n')
-f.write (' toEdgeIndex (const HalfEdgeIndex& index)\n')
-f.write (' {\n')
-f.write (' return (index.isValid () ? EdgeIndex (index.get () / 2) : EdgeIndex ());\n')
-f.write (' }\n\n')
-
-f.write (' /** \\brief Convert the given edge index to a half-edge index.\n')
-f.write (' * \\param[in] get_first The first half-edge of the edge is returned if this variable is true; elsewise the second.\n')
-f.write (' */\n')
-f.write (' inline pcl::geometry::HalfEdgeIndex\n')
-f.write (' toHalfEdgeIndex (const EdgeIndex& index, const bool get_first=true)\n')
-f.write (' {\n')
-f.write (' return (index.isValid () ? HalfEdgeIndex (index.get () * 2 + static_cast <int> (!get_first)) : HalfEdgeIndex ());\n')
-f.write (' }\n')
-f.write (' } // End namespace geometry\n')
-f.write ('} // End namespace pcl\n\n')
-
-f.write ('#endif // PCL_GEOMETRY_MESH_INDICES_H\n')
-
-f.close()
#pragma once
#include <fstream>
-#include <string>
-#include <sstream>
#include <iostream>
+#include <sstream>
+#include <string>
-namespace pcl
-{
- namespace geometry
+namespace pcl {
+namespace geometry {
+/** \brief Read / write the half-edge mesh from / to a file.
+ * \tparam MeshT e.g. pcl::geometry::TriangleMesh or pcl::geometry::PolygonMesh
+ * \author Martin Saelzle
+ * \ingroup geometry
+ * \todo
+ * - Only writes the topology (not the mesh data).
+ * - Supports only ascii.
+ * - Does not consider the mesh traits (e.g. manifold or not)
+ */
+template <class MeshT>
+class MeshIO {
+public:
+ using Mesh = MeshT;
+
+ using Vertex = typename Mesh::Vertex;
+ using HalfEdge = typename Mesh::HalfEdge;
+ using Face = typename Mesh::Face;
+
+ using Vertices = typename Mesh::Vertices;
+ using HalfEdges = typename Mesh::HalfEdges;
+ using Faces = typename Mesh::Faces;
+
+ using VertexIndex = typename Mesh::VertexIndex;
+ using HalfEdgeIndex = typename Mesh::HalfEdgeIndex;
+ using FaceIndex = typename Mesh::FaceIndex;
+
+ /** \brief Constructor. */
+ MeshIO() {}
+
+ /** \brief Read the mesh from a file with the given filename.
+ * \param[in] filename Path to the file.
+ * \param[out] mesh The loaded mesh.
+ * \return true if success.
+ */
+ bool
+ read(const std::string& filename, Mesh& mesh) const
{
- /** \brief Read / write the half-edge mesh from / to a file.
- * \tparam MeshT e.g. pcl::geometry::TriangleMesh or pcl::geometry::PolygonMesh
- * \author Martin Saelzle
- * \ingroup geometry
- * \todo
- * - Only writes the topology (not the mesh data).
- * - Supports only ascii.
- * - Does not consider the mesh traits (e.g. manifold or not)
- */
- template <class MeshT>
- class MeshIO
+ std::ifstream file(filename.c_str());
+
+ if (!file.is_open()) {
+ std::cerr << "Error in MeshIO::read: Could not open the file '" << filename
+ << "'\n";
+ return (false);
+ }
+
+ // Read the header
+ std::string line;
+ unsigned int line_number = 1;
+ int n_v = -1, n_he = -1, n_f = -1;
+
+ if (!std::getline(file, line) || line != "PCL half-edge mesh") {
+ std::cerr << "Error loading '" << filename << "' (line " << line_number
+ << "): Wrong file format.\n";
+ return (false);
+ }
+ ++line_number;
+
+ if (!std::getline(file, line)) {
+ std::cerr << "Error loading '" << filename << "'' (line " << line_number
+ << "): Number of vertices / half-edges / faces not found.\n";
+ return (false);
+ }
{
- public:
-
- using Mesh = MeshT;
-
- using Vertex = typename Mesh::Vertex;
- using HalfEdge = typename Mesh::HalfEdge;
- using Face = typename Mesh::Face;
-
- using Vertices = typename Mesh::Vertices;
- using HalfEdges = typename Mesh::HalfEdges;
- using Faces = typename Mesh::Faces;
-
- using VertexIndex = typename Mesh::VertexIndex;
- using HalfEdgeIndex = typename Mesh::HalfEdgeIndex;
- using FaceIndex = typename Mesh::FaceIndex;
-
- /** \brief Constructor. */
- MeshIO ()
- {
+ std::istringstream iss(line);
+ if (!(iss >> n_v >> n_he >> n_f) || iss.good()) // Don't allow more than 3 en
+ {
+ std::cerr << "Error loading '" << filename << "'' (line " << line_number
+ << "): Could not read the number of vertices / half-edges / faces.\n";
+ return (false);
+ }
+ }
+ if (n_v < 0 || n_he < 0 || n_f < 0) {
+ std::cerr << "Error loading '" << filename << "'' (line " << line_number
+ << "): Invalid number of vertices / half-edges / faces.\n";
+ return (false);
+ }
+ ++line_number;
+
+ // Read the vertices.
+ {
+ mesh.vertices_.reserve(n_v);
+ HalfEdgeIndex idx_ohe; // Outgoing half-edge;
+
+ for (int i = 0; i < n_v; ++i, ++line_number) {
+ if (!std::getline(file, line)) {
+ std::cerr << "Error loading '" << filename << "'' (line " << line_number
+ << "): Could not read the line.\n";
+ return (false);
}
- /** \brief Read the mesh from a file with the given filename.
- * \param[in] filename Path to the file.
- * \param[out] mesh The loaded mesh.
- * \return true if success.
- */
- bool
- read (const std::string& filename, Mesh& mesh) const
- {
- std::ifstream file (filename.c_str ());
-
- if (!file.is_open ())
- {
- std::cerr << "Error in MeshIO::read: Could not open the file '" << filename << "'\n";
- return (false);
- }
-
- // Read the header
- std::string line;
- unsigned int line_number = 1;
- int n_v = -1, n_he = -1, n_f = -1;
-
- if (!std::getline (file, line) || line != "PCL half-edge mesh")
- {
- std::cerr << "Error loading '" << filename << "' (line " << line_number << "): Wrong file format.\n";
- return (false);
- }
- ++line_number;
-
- if (!std::getline (file, line))
- {
- std::cerr << "Error loading '" << filename << "'' (line " << line_number << "): Number of vertices / half-edges / faces not found.\n";
- return (false);
- }
- {
- std::istringstream iss (line);
- if (!(iss >> n_v >> n_he >> n_f) || iss.good ()) // Don't allow more than 3 en
- {
- std::cerr << "Error loading '" << filename << "'' (line " << line_number << "): Could not read the number of vertices / half-edges / faces.\n";
- return (false);
- }
- }
- if (n_v < 0 || n_he < 0 || n_f < 0)
- {
- std::cerr << "Error loading '" << filename << "'' (line " << line_number << "): Invalid number of vertices / half-edges / faces.\n";
- return (false);
- }
- ++line_number;
-
- // Read the vertices.
- {
- mesh.vertices_.reserve (n_v);
- HalfEdgeIndex idx_ohe; // Outgoing half-edge;
-
- for (int i=0; i<n_v; ++i, ++line_number)
- {
- if (!std::getline (file, line))
- {
- std::cerr << "Error loading '" << filename << "'' (line " << line_number << "): Could not read the line.\n";
- return (false);
- }
-
- std::istringstream iss (line);
- if (!(iss >> idx_ohe) || iss.good ())
- {
- std::cerr << "Error loading '" << filename << "'' (line " << line_number << "): Could not read the vertex.\n";
- return (false);
- }
- mesh.vertices_.push_back (Vertex (idx_ohe));
- }
- }
-
- // Read the half-edges.
- {
- mesh.half_edges_.reserve (n_he);
- VertexIndex idx_tv; // Terminating vertex.
- HalfEdgeIndex idx_nhe; // Next half-edge;
- HalfEdgeIndex idx_phe; // Previous half-edge.
- FaceIndex idx_f; // Face.
-
- for (int i=0; i<n_he; ++i, ++line_number)
- {
- if (!std::getline (file, line))
- {
- std::cerr << "Error loading '" << filename << "'' (line " << line_number << "): Could not read the line.\n";
- return (false);
- }
-
- std::istringstream iss (line);
- if (!(iss >> idx_tv >> idx_nhe >> idx_phe >> idx_f) || iss.good ())
- {
- std::cerr << "Error loading '" << filename << "'' (line " << line_number << "): Could not read the half-edge.\n";
- return (false);
- }
- mesh.half_edges_.push_back (HalfEdge (idx_tv, idx_nhe, idx_phe, idx_f));
- }
- }
-
- // Read the faces.
- {
- mesh.faces_.reserve (n_f);
- HalfEdgeIndex idx_ihe; // Inner half-edge.
-
- for (int i=0; i<n_f; ++i, ++line_number)
- {
- if (!std::getline (file, line))
- {
- std::cerr << "Error loading '" << filename << "'' (line " << line_number << "): Could not read the line.\n";
- return (false);
- }
-
- std::istringstream iss (line);
- if (!(iss >> idx_ihe) || iss.good ())
- {
- std::cerr << "Error loading '" << filename << "'' (line " << line_number << "): Could not read the face.\n";
- return (false);
- }
- mesh.faces_.push_back (Face (idx_ihe));
- }
- }
-
- // Set the data
- if (Mesh::HasVertexData::value) mesh.vertex_data_cloud_. resize (n_v);
- if (Mesh::HasHalfEdgeData::value) mesh.half_edge_data_cloud_.resize (n_he);
- if (Mesh::HasEdgeData::value) mesh.edge_data_cloud_. resize (n_he / 2);
- if (Mesh::HasFaceData::value) mesh.face_data_cloud_. resize (n_f);
-
- return (true);
+ std::istringstream iss(line);
+ if (!(iss >> idx_ohe) || iss.good()) {
+ std::cerr << "Error loading '" << filename << "'' (line " << line_number
+ << "): Could not read the vertex.\n";
+ return (false);
}
+ mesh.vertices_.push_back(Vertex(idx_ohe));
+ }
+ }
- /** \brief Write the mesh to a file with the given filename.
- * \param[in] filename Path to the file.
- * \param[in] mesh The saved mesh.
- * \return true if success
- */
- bool
- write (const std::string& filename, const Mesh& mesh) const
- {
- std::ofstream file (filename.c_str ());
-
- // Write the header
- if (!file.is_open ())
- {
- std::cerr << "Error in MeshIO::write: Could not open the file '" << filename << "'\n";
- return (false);
- }
-
- file << "PCL half-edge mesh\n";
- file << mesh.sizeVertices () << " "
- << mesh.sizeHalfEdges () << " "
- << mesh.sizeFaces () << "\n";
-
- // Write the vertices
- for (typename Vertices::const_iterator it=mesh.vertices_.begin (); it!=mesh.vertices_.end (); ++it)
- {
- file << it->idx_outgoing_half_edge_ << "\n";
- }
-
- // Write the half-edges
- for (typename HalfEdges::const_iterator it=mesh.half_edges_.begin (); it!=mesh.half_edges_.end (); ++it)
- {
- file << it->idx_terminating_vertex_ << " "
- << it->idx_next_half_edge_ << " "
- << it->idx_prev_half_edge_ << " "
- << it->idx_face_ << "\n";
- }
+ // Read the half-edges.
+ {
+ mesh.half_edges_.reserve(n_he);
+ VertexIndex idx_tv; // Terminating vertex.
+ HalfEdgeIndex idx_nhe; // Next half-edge;
+ HalfEdgeIndex idx_phe; // Previous half-edge.
+ FaceIndex idx_f; // Face.
+
+ for (int i = 0; i < n_he; ++i, ++line_number) {
+ if (!std::getline(file, line)) {
+ std::cerr << "Error loading '" << filename << "'' (line " << line_number
+ << "): Could not read the line.\n";
+ return (false);
+ }
- // Write the faces
- for (typename Faces::const_iterator it=mesh.faces_.begin (); it!=mesh.faces_.end (); ++it)
- {
- file << it->idx_inner_half_edge_ << "\n";
- }
+ std::istringstream iss(line);
+ if (!(iss >> idx_tv >> idx_nhe >> idx_phe >> idx_f) || iss.good()) {
+ std::cerr << "Error loading '" << filename << "'' (line " << line_number
+ << "): Could not read the half-edge.\n";
+ return (false);
+ }
+ mesh.half_edges_.push_back(HalfEdge(idx_tv, idx_nhe, idx_phe, idx_f));
+ }
+ }
- return (true);
+ // Read the faces.
+ {
+ mesh.faces_.reserve(n_f);
+ HalfEdgeIndex idx_ihe; // Inner half-edge.
+
+ for (int i = 0; i < n_f; ++i, ++line_number) {
+ if (!std::getline(file, line)) {
+ std::cerr << "Error loading '" << filename << "'' (line " << line_number
+ << "): Could not read the line.\n";
+ return (false);
}
- };
- } // End namespace geometry
+ std::istringstream iss(line);
+ if (!(iss >> idx_ihe) || iss.good()) {
+ std::cerr << "Error loading '" << filename << "'' (line " << line_number
+ << "): Could not read the face.\n";
+ return (false);
+ }
+ mesh.faces_.push_back(Face(idx_ihe));
+ }
+ }
+
+ // Set the data
+ if (Mesh::HasVertexData::value)
+ mesh.vertex_data_cloud_.resize(n_v);
+ if (Mesh::HasHalfEdgeData::value)
+ mesh.half_edge_data_cloud_.resize(n_he);
+ if (Mesh::HasEdgeData::value)
+ mesh.edge_data_cloud_.resize(n_he / 2);
+ if (Mesh::HasFaceData::value)
+ mesh.face_data_cloud_.resize(n_f);
+
+ return (true);
+ }
+
+ /** \brief Write the mesh to a file with the given filename.
+ * \param[in] filename Path to the file.
+ * \param[in] mesh The saved mesh.
+ * \return true if success
+ */
+ bool
+ write(const std::string& filename, const Mesh& mesh) const
+ {
+ std::ofstream file(filename.c_str());
+
+ // Write the header
+ if (!file.is_open()) {
+ std::cerr << "Error in MeshIO::write: Could not open the file '" << filename
+ << "'\n";
+ return (false);
+ }
+
+ file << "PCL half-edge mesh\n";
+ file << mesh.sizeVertices() << " " << mesh.sizeHalfEdges() << " "
+ << mesh.sizeFaces() << "\n";
+
+ // Write the vertices
+ for (typename Vertices::const_iterator it = mesh.vertices_.begin();
+ it != mesh.vertices_.end();
+ ++it) {
+ file << it->idx_outgoing_half_edge_ << "\n";
+ }
+
+ // Write the half-edges
+ for (typename HalfEdges::const_iterator it = mesh.half_edges_.begin();
+ it != mesh.half_edges_.end();
+ ++it) {
+ file << it->idx_terminating_vertex_ << " " << it->idx_next_half_edge_ << " "
+ << it->idx_prev_half_edge_ << " " << it->idx_face_ << "\n";
+ }
+
+ // Write the faces
+ for (typename Faces::const_iterator it = mesh.faces_.begin();
+ it != mesh.faces_.end();
+ ++it) {
+ file << it->idx_inner_half_edge_ << "\n";
+ }
+
+ return (true);
+ }
+};
+
+} // End namespace geometry
} // End namespace pcl
#include <type_traits>
-namespace pcl
-{
- namespace geometry
- {
- /** \brief No data is associated with the vertices / half-edges / edges / faces. */
- struct NoData {};
+namespace pcl {
+namespace geometry {
+/** \brief No data is associated with the vertices / half-edges / edges / faces. */
+struct NoData {};
- /** \brief The mesh traits are used to set up compile time settings for the mesh.
- * \tparam VertexDataT Data stored for each vertex. Defaults to pcl::NoData.
- * \tparam HalfEdgeDataT Data stored for each half-edge. Defaults to pcl::NoData.
- * \tparam EdgeDataT Data stored for each edge. Defaults to pcl::NoData.
- * \tparam FaceDataT Data stored for each face. Defaults to pcl::NoData.
- * \author Martin Saelzle
- * \ingroup geometry
- */
- template <class VertexDataT = pcl::geometry::NoData,
- class HalfEdgeDataT = pcl::geometry::NoData,
- class EdgeDataT = pcl::geometry::NoData,
- class FaceDataT = pcl::geometry::NoData>
- struct DefaultMeshTraits
- {
- using VertexData = VertexDataT;
- using HalfEdgeData = HalfEdgeDataT;
- using EdgeData = EdgeDataT;
- using FaceData = FaceDataT;
+/** \brief The mesh traits are used to set up compile time settings for the mesh.
+ * \tparam VertexDataT Data stored for each vertex. Defaults to pcl::NoData.
+ * \tparam HalfEdgeDataT Data stored for each half-edge. Defaults to pcl::NoData.
+ * \tparam EdgeDataT Data stored for each edge. Defaults to pcl::NoData.
+ * \tparam FaceDataT Data stored for each face. Defaults to pcl::NoData.
+ * \author Martin Saelzle
+ * \ingroup geometry
+ */
+template <class VertexDataT = pcl::geometry::NoData,
+ class HalfEdgeDataT = pcl::geometry::NoData,
+ class EdgeDataT = pcl::geometry::NoData,
+ class FaceDataT = pcl::geometry::NoData>
+struct DefaultMeshTraits {
+ using VertexData = VertexDataT;
+ using HalfEdgeData = HalfEdgeDataT;
+ using EdgeData = EdgeDataT;
+ using FaceData = FaceDataT;
- /** \brief Specifies whether the mesh is manifold or not (only non-manifold vertices can be represented). */
- using IsManifold = std::false_type;
- };
- } // End namespace geometry
+ /** \brief Specifies whether the mesh is manifold or not (only non-manifold vertices
+ * can be represented). */
+ using IsManifold = std::false_type;
+};
+} // End namespace geometry
} // End namespace pcl
#pragma once
-namespace pcl
-{
-/** \brief base class for iterators on 2-dimensional maps like images/organized clouds etc.
- * \author Suat Gedikli <gedikli@willowgarage.com>
- * \ingroup geometry
- */
-class OrganizedIndexIterator
-{
- public:
- /** \brief constructor
- * \param[in] width the width of the image/organized cloud
- */
- OrganizedIndexIterator (unsigned width);
-
- /** \brief virtual destructor*/
- virtual ~OrganizedIndexIterator ();
-
- /** \brief go to next pixel/point in image/cloud*/
- virtual void operator ++ () = 0;
-
- /** \brief go to next pixel/point in image/cloud*/
- virtual void operator ++ (int);
-
- /** \brief returns the pixel/point index in the linearized memory of the image/cloud
- * \return the pixel/point index in the linearized memory of the image/cloud
- */
- unsigned operator* () const;
-
- /** \brief returns the pixel/point index in the linearized memory of the image/cloud
- * \return the pixel/point index in the linearized memory of the image/cloud
- */
- virtual unsigned getIndex () const;
-
- /** \brief returns the row index (y-coordinate) of the current pixel/point
- * \return the row index (y-coordinate) of the current pixel/point
- */
- virtual unsigned getRowIndex () const;
-
- /** \brief returns the col index (x-coordinate) of the current pixel/point
- * \return the col index (x-coordinate) of the current pixel/point
- */
- virtual unsigned getColumnIndex () const;
-
- /** \brief return whether the current visited pixel/point is valid or not.
- * \return true if the current pixel/point is within the points to be iterated over, false otherwise
- */
- virtual bool isValid () const = 0;
-
- /** \brief resets the iterator to the beginning of the line
- */
- virtual void reset () = 0;
-
- protected:
- /** \brief the width of the image/cloud*/
- unsigned width_;
-
- /** \brief the index of the current pixel/point*/
- unsigned index_;
+namespace pcl {
+/** \brief base class for iterators on 2-dimensional maps like images/organized clouds
+ * etc.
+ * \author Suat Gedikli <gedikli@willowgarage.com>
+ * \ingroup geometry
+ */
+class OrganizedIndexIterator {
+public:
+ /**
+ * \brief constructor
+ * \param[in] width the width of the image/organized cloud
+ */
+ OrganizedIndexIterator(unsigned width);
+
+ /** \brief virtual destructor*/
+ virtual ~OrganizedIndexIterator();
+
+ /** \brief go to next pixel/point in image/cloud*/
+ virtual void
+ operator++() = 0;
+
+ /** \brief go to next pixel/point in image/cloud*/
+ virtual void
+ operator++(int);
+
+ /** \brief returns the pixel/point index in the linearized memory of the image/cloud
+ * \return the pixel/point index in the linearized memory of the image/cloud
+ */
+ unsigned
+ operator*() const;
+
+ /** \brief returns the pixel/point index in the linearized memory of the image/cloud
+ * \return the pixel/point index in the linearized memory of the image/cloud
+ */
+ virtual unsigned
+ getIndex() const;
+
+ /** \brief returns the row index (y-coordinate) of the current pixel/point
+ * \return the row index (y-coordinate) of the current pixel/point
+ */
+ virtual unsigned
+ getRowIndex() const;
+
+ /** \brief returns the col index (x-coordinate) of the current pixel/point
+ * \return the col index (x-coordinate) of the current pixel/point
+ */
+ virtual unsigned
+ getColumnIndex() const;
+
+ /** \brief return whether the current visited pixel/point is valid or not.
+ * \return true if the current pixel/point is within the points to be iterated over,
+ * false otherwise
+ */
+ virtual bool
+ isValid() const = 0;
+
+ /** \brief resets the iterator to the beginning of the line
+ */
+ virtual void
+ reset() = 0;
+
+protected:
+ /** \brief the width of the image/cloud*/
+ unsigned width_;
+
+ /** \brief the index of the current pixel/point*/
+ unsigned index_;
};
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
-inline OrganizedIndexIterator::OrganizedIndexIterator (unsigned width)
-: width_ (width)
-, index_ (0)
-{
-}
+inline OrganizedIndexIterator::OrganizedIndexIterator(unsigned width)
+: width_(width), index_(0)
+{}
////////////////////////////////////////////////////////////////////////////////
-inline OrganizedIndexIterator::~OrganizedIndexIterator ()
-{
-}
+inline OrganizedIndexIterator::~OrganizedIndexIterator() {}
////////////////////////////////////////////////////////////////////////////////
inline void
-OrganizedIndexIterator::operator++ (int)
+OrganizedIndexIterator::operator++(int)
{
- return operator ++();
+ return operator++();
}
////////////////////////////////////////////////////////////////////////////////
inline unsigned
-pcl::OrganizedIndexIterator::operator * () const
+pcl::OrganizedIndexIterator::operator*() const
{
return index_;
}
////////////////////////////////////////////////////////////////////////////////
inline unsigned
-pcl::OrganizedIndexIterator::getIndex () const
+pcl::OrganizedIndexIterator::getIndex() const
{
return index_;
}
////////////////////////////////////////////////////////////////////////////////
-/** \brief default implementation. Should be overloaded
+/** \brief default implementation. Should be overloaded
*/
inline unsigned
-pcl::OrganizedIndexIterator::getRowIndex () const
+pcl::OrganizedIndexIterator::getRowIndex() const
{
return index_ / width_;
}
////////////////////////////////////////////////////////////////////////////////
inline unsigned
-pcl::OrganizedIndexIterator::getColumnIndex () const
+pcl::OrganizedIndexIterator::getColumnIndex() const
{
return index_ % width_;
}
#pragma once
-#include <pcl/common/eigen.h>
+#include <pcl/ModelCoefficients.h>
#include <pcl/memory.h>
#include <pcl/pcl_macros.h>
#include <pcl/point_cloud.h>
-#include <pcl/ModelCoefficients.h>
-namespace pcl
-{
- /** \brief PlanarPolygon represents a planar (2D) polygon, potentially in a 3D space.
- * \author Alex Trevor
- */
- template <typename PointT>
- class PlanarPolygon
+namespace pcl {
+/** \brief PlanarPolygon represents a planar (2D) polygon, potentially in a 3D space.
+ * \author Alex Trevor
+ */
+template <typename PointT>
+class PlanarPolygon {
+public:
+ using Ptr = shared_ptr<PlanarPolygon<PointT>>;
+ using ConstPtr = shared_ptr<const PlanarPolygon<PointT>>;
+
+ /** \brief Empty constructor for PlanarPolygon */
+ PlanarPolygon() : contour_() {}
+
+ /** \brief Constructor for PlanarPolygon
+ * \param[in] contour a vector of points bounding the polygon
+ * \param[in] coefficients a vector of the plane's coefficients (a,b,c,d)
+ */
+ PlanarPolygon(typename pcl::PointCloud<PointT>::VectorType& contour,
+ Eigen::Vector4f& coefficients)
+ : contour_(contour), coefficients_(coefficients)
+ {}
+
+ /** \brief Destructor. */
+ virtual ~PlanarPolygon() {}
+
+ /** \brief Set the internal contour
+ * \param[in] contour the new planar polygonal contour
+ */
+ void
+ setContour(const pcl::PointCloud<PointT>& contour)
+ {
+ contour_ = contour.points;
+ }
+
+ /** \brief Getter for the contour / boundary */
+ typename pcl::PointCloud<PointT>::VectorType&
+ getContour()
+ {
+ return (contour_);
+ }
+
+ /** \brief Getter for the contour / boundary */
+ const typename pcl::PointCloud<PointT>::VectorType&
+ getContour() const
{
- public:
- using Ptr = shared_ptr<PlanarPolygon<PointT> >;
- using ConstPtr = shared_ptr<const PlanarPolygon<PointT> >;
-
- /** \brief Empty constructor for PlanarPolygon */
- PlanarPolygon () : contour_ ()
- {}
-
- /** \brief Constructor for PlanarPolygon
- * \param[in] contour a vector of points bounding the polygon
- * \param[in] coefficients a vector of the plane's coefficients (a,b,c,d)
- */
- PlanarPolygon (typename pcl::PointCloud<PointT>::VectorType &contour,
- Eigen::Vector4f& coefficients)
- : contour_ (contour), coefficients_ (coefficients)
- {}
-
- /** \brief Destructor. */
- virtual ~PlanarPolygon () {}
-
- /** \brief Set the internal contour
- * \param[in] contour the new planar polygonal contour
- */
- void
- setContour (const pcl::PointCloud<PointT> &contour)
- {
- contour_ = contour.points;
- }
-
- /** \brief Getter for the contour / boundary */
- typename pcl::PointCloud<PointT>::VectorType&
- getContour ()
- {
- return (contour_);
- }
-
- /** \brief Getter for the contour / boundary */
- const typename pcl::PointCloud<PointT>::VectorType&
- getContour () const
- {
- return (contour_);
- }
-
- /** \brief Setr the internal coefficients
- * \param[in] coefficients the new coefficients to be set
- */
- void
- setCoefficients (const Eigen::Vector4f &coefficients)
- {
- coefficients_ = coefficients;
- }
-
- /** \brief Set the internal coefficients
- * \param[in] coefficients the new coefficients to be set
- */
- void
- setCoefficients (const pcl::ModelCoefficients &coefficients)
- {
- for (int i = 0; i < 4; i++)
- coefficients_[i] = coefficients.values.at (i);
- }
-
- /** \brief Getter for the coefficients */
- Eigen::Vector4f&
- getCoefficients ()
- {
- return (coefficients_);
- }
-
- /** \brief Getter for the coefficients */
- const Eigen::Vector4f&
- getCoefficients () const
- {
- return (coefficients_);
- }
-
- protected:
- /** \brief A list of points on the boundary/contour of the planar region. */
- typename pcl::PointCloud<PointT>::VectorType contour_;
-
- /** \brief A list of model coefficients (a,b,c,d). */
- Eigen::Vector4f coefficients_;
-
- public:
- PCL_MAKE_ALIGNED_OPERATOR_NEW
- };
-}
+ return (contour_);
+ }
+
+ /** \brief Setr the internal coefficients
+ * \param[in] coefficients the new coefficients to be set
+ */
+ void
+ setCoefficients(const Eigen::Vector4f& coefficients)
+ {
+ coefficients_ = coefficients;
+ }
+
+ /** \brief Set the internal coefficients
+ * \param[in] coefficients the new coefficients to be set
+ */
+ void
+ setCoefficients(const pcl::ModelCoefficients& coefficients)
+ {
+ for (int i = 0; i < 4; i++)
+ coefficients_[i] = coefficients.values.at(i);
+ }
+
+ /** \brief Getter for the coefficients */
+ Eigen::Vector4f&
+ getCoefficients()
+ {
+ return (coefficients_);
+ }
+
+ /** \brief Getter for the coefficients */
+ const Eigen::Vector4f&
+ getCoefficients() const
+ {
+ return (coefficients_);
+ }
+
+protected:
+ /** \brief A list of points on the boundary/contour of the planar region. */
+ typename pcl::PointCloud<PointT>::VectorType contour_;
+
+ /** \brief A list of model coefficients (a,b,c,d). */
+ Eigen::Vector4f coefficients_;
+
+public:
+ PCL_MAKE_ALIGNED_OPERATOR_NEW
+};
+} // namespace pcl
#pragma once
+#include <pcl/geometry/mesh_base.h>
#include <pcl/memory.h>
#include <pcl/pcl_macros.h>
-#include <pcl/geometry/mesh_base.h>
-namespace pcl
-{
- namespace geometry
+namespace pcl {
+namespace geometry {
+/** \brief Tag describing the type of the mesh. */
+struct PolygonMeshTag {};
+
+/** \brief General half-edge mesh that can store any polygon with a minimum number of
+ * vertices of 3.
+ * \tparam MeshTraitsT Please have a look at
+ * pcl::geometry::DefaultMeshTraits.
+ * \author Martin Saelzle
+ * \ingroup geometry
+ */
+template <class MeshTraitsT>
+class PolygonMesh : public pcl::geometry::MeshBase<PolygonMesh<MeshTraitsT>,
+ MeshTraitsT,
+ PolygonMeshTag> {
+public:
+ using Base =
+ pcl::geometry::MeshBase<PolygonMesh<MeshTraitsT>, MeshTraitsT, PolygonMeshTag>;
+
+ using Self = PolygonMesh<MeshTraitsT>;
+ using Ptr = shared_ptr<Self>;
+ using ConstPtr = shared_ptr<const Self>;
+
+ using VertexData = typename Base::VertexData;
+ using HalfEdgeData = typename Base::HalfEdgeData;
+ using EdgeData = typename Base::EdgeData;
+ using FaceData = typename Base::FaceData;
+ using IsManifold = typename Base::IsManifold;
+ using MeshTag = typename Base::MeshTag;
+
+ using HasVertexData = typename Base::HasVertexData;
+ using HasHalfEdgeData = typename Base::HasHalfEdgeData;
+ using HasEdgeData = typename Base::HasEdgeData;
+ using HasFaceData = typename Base::HasFaceData;
+
+ using VertexDataCloud = typename Base::VertexDataCloud;
+ using HalfEdgeDataCloud = typename Base::HalfEdgeDataCloud;
+ using EdgeDataCloud = typename Base::EdgeDataCloud;
+ using FaceDataCloud = typename Base::FaceDataCloud;
+
+ // Indices
+ using VertexIndex = typename Base::VertexIndex;
+ using HalfEdgeIndex = typename Base::HalfEdgeIndex;
+ using EdgeIndex = typename Base::EdgeIndex;
+ using FaceIndex = typename Base::FaceIndex;
+
+ using VertexIndices = typename Base::VertexIndices;
+ using HalfEdgeIndices = typename Base::HalfEdgeIndices;
+ using EdgeIndices = typename Base::EdgeIndices;
+ using FaceIndices = typename Base::FaceIndices;
+
+ // Circulators
+ using VertexAroundVertexCirculator = typename Base::VertexAroundVertexCirculator;
+ using OutgoingHalfEdgeAroundVertexCirculator =
+ typename Base::OutgoingHalfEdgeAroundVertexCirculator;
+ using IncomingHalfEdgeAroundVertexCirculator =
+ typename Base::IncomingHalfEdgeAroundVertexCirculator;
+ using FaceAroundVertexCirculator = typename Base::FaceAroundVertexCirculator;
+ using VertexAroundFaceCirculator = typename Base::VertexAroundFaceCirculator;
+ using InnerHalfEdgeAroundFaceCirculator =
+ typename Base::InnerHalfEdgeAroundFaceCirculator;
+ using OuterHalfEdgeAroundFaceCirculator =
+ typename Base::OuterHalfEdgeAroundFaceCirculator;
+ using FaceAroundFaceCirculator = typename Base::FaceAroundFaceCirculator;
+
+ /** \brief Constructor. */
+ PolygonMesh() : Base(), add_triangle_(3), add_quad_(4) {}
+
+ /** \brief The base method of addFace is hidden because of the overloads in this
+ * class. */
+ using Base::addFace;
+
+ /** \brief Add a triangle to the mesh. Data is only added if it is associated with the
+ * elements. The last vertex is connected with the first one.
+ * \param[in] idx_v_0 Index
+ * to the first vertex.
+ * \param[in] idx_v_1 Index to the second vertex.
+ * \param[in] idx_v_2 Index to the third vertex.
+ * \param[in] face_data Data that is set for the face.
+ * \param[in] half_edge_data Data that is set for all added half-edges.
+ * \param[in] edge_data Data that is set for all added edges.
+ * \return Index to the new face. Failure is signaled by returning an invalid face
+ * index.
+ * \warning The vertices must be valid and unique (each vertex may be contained
+ * only once). Not complying with this requirement results in undefined behavior!
+ */
+ inline FaceIndex
+ addFace(const VertexIndex& idx_v_0,
+ const VertexIndex& idx_v_1,
+ const VertexIndex& idx_v_2,
+ const FaceData& face_data = FaceData(),
+ const EdgeData& edge_data = EdgeData(),
+ const HalfEdgeData& half_edge_data = HalfEdgeData())
+ {
+ add_triangle_[0] = idx_v_0;
+ add_triangle_[1] = idx_v_1;
+ add_triangle_[2] = idx_v_2;
+
+ return (this->addFaceImplBase(add_triangle_, face_data, edge_data, half_edge_data));
+ }
+
+ /** \brief Add a quad to the mesh. Data is only added if it is associated with the
+ * elements. The last vertex is connected with the first one.
+ * \param[in] idx_v_0 Index to the first vertex.
+ * \param[in] idx_v_1 Index to the second vertex.
+ * \param[in] idx_v_2 Index to the third vertex.
+ * \param[in] idx_v_3 Index to the fourth vertex.
+ * \param[in] face_data Data that is set for the face.
+ * \param[in] half_edge_data Data that is set for all added half-edges.
+ * \param[in] edge_data Data that is set for all added edges.
+ * \return Index to the new face. Failure is signaled by returning an invalid face
+ * index.
+ * \warning The vertices must be valid and unique (each vertex may be contained
+ * only once). Not complying with this requirement results in undefined behavior!
+ */
+ inline FaceIndex
+ addFace(const VertexIndex& idx_v_0,
+ const VertexIndex& idx_v_1,
+ const VertexIndex& idx_v_2,
+ const VertexIndex& idx_v_3,
+ const FaceData& face_data = FaceData(),
+ const EdgeData& edge_data = EdgeData(),
+ const HalfEdgeData& half_edge_data = HalfEdgeData())
+ {
+ add_quad_[0] = idx_v_0;
+ add_quad_[1] = idx_v_1;
+ add_quad_[2] = idx_v_2;
+ add_quad_[3] = idx_v_3;
+
+ return (this->addFaceImplBase(add_quad_, face_data, edge_data, half_edge_data));
+ }
+
+private:
+ // NOTE: Can't use the typedef of Base as a friend.
+ friend class pcl::geometry::
+ MeshBase<PolygonMesh<MeshTraitsT>, MeshTraitsT, pcl::geometry::PolygonMeshTag>;
+
+ /** \brief addFace for the polygon mesh. */
+ inline FaceIndex
+ addFaceImpl(const VertexIndices& vertices,
+ const FaceData& face_data,
+ const EdgeData& edge_data,
+ const HalfEdgeData& half_edge_data)
{
- /** \brief Tag describing the type of the mesh. */
- struct PolygonMeshTag {};
-
- /** \brief General half-edge mesh that can store any polygon with a minimum number of vertices of 3.
- * \tparam MeshTraitsT Please have a look at pcl::geometry::DefaultMeshTraits.
- * \author Martin Saelzle
- * \ingroup geometry
- */
- template <class MeshTraitsT>
- class PolygonMesh : public pcl::geometry::MeshBase <PolygonMesh <MeshTraitsT>, MeshTraitsT, PolygonMeshTag>
- {
- public:
-
- using Base = pcl::geometry::MeshBase <PolygonMesh <MeshTraitsT>, MeshTraitsT, PolygonMeshTag>;
-
- using Self = PolygonMesh<MeshTraitsT>;
- using Ptr = shared_ptr<Self>;
- using ConstPtr = shared_ptr<const Self>;
-
- using VertexData = typename Base::VertexData;
- using HalfEdgeData = typename Base::HalfEdgeData;
- using EdgeData = typename Base::EdgeData;
- using FaceData = typename Base::FaceData;
- using IsManifold = typename Base::IsManifold;
- using MeshTag = typename Base::MeshTag;
-
- using HasVertexData = typename Base::HasVertexData;
- using HasHalfEdgeData = typename Base::HasHalfEdgeData;
- using HasEdgeData = typename Base::HasEdgeData;
- using HasFaceData = typename Base::HasFaceData;
-
- using VertexDataCloud = typename Base::VertexDataCloud;
- using HalfEdgeDataCloud = typename Base::HalfEdgeDataCloud;
- using EdgeDataCloud = typename Base::EdgeDataCloud;
- using FaceDataCloud = typename Base::FaceDataCloud;
-
- // Indices
- using VertexIndex = typename Base::VertexIndex;
- using HalfEdgeIndex = typename Base::HalfEdgeIndex;
- using EdgeIndex = typename Base::EdgeIndex;
- using FaceIndex = typename Base::FaceIndex;
-
- using VertexIndices = typename Base::VertexIndices;
- using HalfEdgeIndices = typename Base::HalfEdgeIndices;
- using EdgeIndices = typename Base::EdgeIndices;
- using FaceIndices = typename Base::FaceIndices;
-
- // Circulators
- using VertexAroundVertexCirculator = typename Base::VertexAroundVertexCirculator;
- using OutgoingHalfEdgeAroundVertexCirculator = typename Base::OutgoingHalfEdgeAroundVertexCirculator;
- using IncomingHalfEdgeAroundVertexCirculator = typename Base::IncomingHalfEdgeAroundVertexCirculator;
- using FaceAroundVertexCirculator = typename Base::FaceAroundVertexCirculator;
- using VertexAroundFaceCirculator = typename Base::VertexAroundFaceCirculator;
- using InnerHalfEdgeAroundFaceCirculator = typename Base::InnerHalfEdgeAroundFaceCirculator;
- using OuterHalfEdgeAroundFaceCirculator = typename Base::OuterHalfEdgeAroundFaceCirculator;
- using FaceAroundFaceCirculator = typename Base::FaceAroundFaceCirculator;
-
- /** \brief Constructor. */
- PolygonMesh ()
- : Base (),
- add_triangle_ (3),
- add_quad_ (4)
- {
- }
-
- /** \brief The base method of addFace is hidden because of the overloads in this class. */
- using Base::addFace;
-
- /** \brief Add a triangle to the mesh. Data is only added if it is associated with the elements. The last vertex is connected with the first one.
- * \param[in] idx_v_0 Index to the first vertex.
- * \param[in] idx_v_1 Index to the second vertex.
- * \param[in] idx_v_2 Index to the third vertex.
- * \param[in] face_data Data that is set for the face.
- * \param[in] half_edge_data Data that is set for all added half-edges.
- * \param[in] edge_data Data that is set for all added edges.
- * \return Index to the new face. Failure is signaled by returning an invalid face index.
- * \warning The vertices must be valid and unique (each vertex may be contained only once). Not complying with this requirement results in undefined behavior!
- */
- inline FaceIndex
- addFace (const VertexIndex& idx_v_0,
- const VertexIndex& idx_v_1,
- const VertexIndex& idx_v_2,
- const FaceData& face_data = FaceData (),
- const EdgeData& edge_data = EdgeData (),
- const HalfEdgeData& half_edge_data = HalfEdgeData ())
- {
- add_triangle_ [0] = idx_v_0;
- add_triangle_ [1] = idx_v_1;
- add_triangle_ [2] = idx_v_2;
-
- return (this->addFaceImplBase (add_triangle_, face_data, edge_data, half_edge_data));
- }
-
- /** \brief Add a quad to the mesh. Data is only added if it is associated with the elements. The last vertex is connected with the first one.
- * \param[in] idx_v_0 Index to the first vertex.
- * \param[in] idx_v_1 Index to the second vertex.
- * \param[in] idx_v_2 Index to the third vertex.
- * \param[in] idx_v_3 Index to the fourth vertex.
- * \param[in] face_data Data that is set for the face.
- * \param[in] half_edge_data Data that is set for all added half-edges.
- * \param[in] edge_data Data that is set for all added edges.
- * \return Index to the new face. Failure is signaled by returning an invalid face index.
- * \warning The vertices must be valid and unique (each vertex may be contained only once). Not complying with this requirement results in undefined behavior!
- */
- inline FaceIndex
- addFace (const VertexIndex& idx_v_0,
- const VertexIndex& idx_v_1,
- const VertexIndex& idx_v_2,
- const VertexIndex& idx_v_3,
- const FaceData& face_data = FaceData (),
- const EdgeData& edge_data = EdgeData (),
- const HalfEdgeData& half_edge_data = HalfEdgeData ())
- {
- add_quad_ [0] = idx_v_0;
- add_quad_ [1] = idx_v_1;
- add_quad_ [2] = idx_v_2;
- add_quad_ [3] = idx_v_3;
-
- return (this->addFaceImplBase (add_quad_, face_data, edge_data, half_edge_data));
- }
-
- private:
-
- // NOTE: Can't use the typedef of Base as a friend.
- friend class pcl::geometry::MeshBase <PolygonMesh <MeshTraitsT>, MeshTraitsT, pcl::geometry::PolygonMeshTag>;
-
- /** \brief addFace for the polygon mesh. */
- inline FaceIndex
- addFaceImpl (const VertexIndices& vertices,
- const FaceData& face_data,
- const EdgeData& edge_data,
- const HalfEdgeData& half_edge_data)
- {
- return (this->addFaceImplBase (vertices, face_data, edge_data, half_edge_data));
- }
-
- ////////////////////////////////////////////////////////////////////////
- // Members
- ////////////////////////////////////////////////////////////////////////
-
- /** \brief Storage for adding a triangle. */
- VertexIndices add_triangle_;
-
- /** \brief Storage for adding a quad. */
- VertexIndices add_quad_;
-
- public:
- PCL_MAKE_ALIGNED_OPERATOR_NEW
- };
- } // End namespace geom
+ return (this->addFaceImplBase(vertices, face_data, edge_data, half_edge_data));
+ }
+
+ ////////////////////////////////////////////////////////////////////////
+ // Members
+ ////////////////////////////////////////////////////////////////////////
+
+ /** \brief Storage for adding a triangle. */
+ VertexIndices add_triangle_;
+
+ /** \brief Storage for adding a quad. */
+ VertexIndices add_quad_;
+
+public:
+ PCL_MAKE_ALIGNED_OPERATOR_NEW
+};
+} // namespace geometry
} // End namespace pcl
#pragma once
-#include "planar_polygon.h"
#include <pcl/point_cloud.h>
-namespace pcl
-{
- /** \brief see approximatePolygon2D
- * \author Suat Gedikli <gedikli@willowgarage.com>
- */
- template <typename PointT>
- void approximatePolygon (const PlanarPolygon<PointT>& polygon, PlanarPolygon<PointT>& approx_polygon, float threshold, bool refine = false, bool closed = true);
-
- /** \brief returns an approximate polygon to given 2D contour. Uses just X and Y values.
- * \note if refinement is not turned on, the resulting polygon will contain points from the original contour with their original z values (if any)
- * \note if refinement is turned on, the z values of the refined polygon are not valid and should be set to 0 if point contains z attribute.
- * \param [in] polygon input polygon
- * \param [out] approx_polygon approximate polygon
- * \param [in] threshold maximum allowed distance of an input vertex to an output edge
- * \param refine
- * \param [in] closed whether it is a closed polygon or a polyline
- * \author Suat Gedikli <gedikli@willowgarage.com>
- */
- template <typename PointT>
- void approximatePolygon2D (const typename PointCloud<PointT>::VectorType &polygon,
- typename PointCloud<PointT>::VectorType &approx_polygon,
- float threshold, bool refine = false, bool closed = true);
+#include "planar_polygon.h"
+
+namespace pcl {
+/** \brief see approximatePolygon2D
+ * \author Suat Gedikli <gedikli@willowgarage.com>
+ */
+template <typename PointT>
+void
+approximatePolygon(const PlanarPolygon<PointT>& polygon,
+ PlanarPolygon<PointT>& approx_polygon,
+ float threshold,
+ bool refine = false,
+ bool closed = true);
+
+/** \brief returns an approximate polygon to given 2D contour. Uses just X and Y values.
+ * \note if refinement is not turned on, the resulting polygon will contain points from
+ * the original contour with their original z values (if any)
+ * \note if refinement is turned on, the z values of the refined polygon are not valid
+ * and should be set to 0 if point contains z attribute.
+ * \param [in] polygon input polygon
+ * \param [out] approx_polygon approximate polygon
+ * \param [in] threshold maximum allowed distance of an input vertex to an output edge
+ * \param refine
+ * \param [in] closed whether it is a closed polygon or a polyline
+ * \author Suat Gedikli <gedikli@willowgarage.com>
+ */
+template <typename PointT>
+void
+approximatePolygon2D(const typename PointCloud<PointT>::VectorType& polygon,
+ typename PointCloud<PointT>::VectorType& approx_polygon,
+ float threshold,
+ bool refine = false,
+ bool closed = true);
} // namespace pcl
#pragma once
+#include <pcl/geometry/mesh_base.h>
#include <pcl/memory.h>
#include <pcl/pcl_macros.h>
-#include <pcl/geometry/mesh_base.h>
-namespace pcl
-{
- namespace geometry
+namespace pcl {
+namespace geometry {
+/** \brief Tag describing the type of the mesh. */
+struct QuadMeshTag {};
+
+/** \brief Half-edge mesh that can only store quads.
+ * \tparam MeshTraitsT Please have a look at pcl::geometry::DefaultMeshTraits.
+ * \author Martin Saelzle
+ * \ingroup geometry
+ */
+template <class MeshTraitsT>
+class QuadMesh
+: public pcl::geometry::MeshBase<QuadMesh<MeshTraitsT>, MeshTraitsT, QuadMeshTag> {
+public:
+ using Base = pcl::geometry::MeshBase<QuadMesh<MeshTraitsT>, MeshTraitsT, QuadMeshTag>;
+
+ using Self = QuadMesh<MeshTraitsT>;
+ using Ptr = shared_ptr<Self>;
+ using ConstPtr = shared_ptr<const Self>;
+
+ using VertexData = typename Base::VertexData;
+ using HalfEdgeData = typename Base::HalfEdgeData;
+ using EdgeData = typename Base::EdgeData;
+ using FaceData = typename Base::FaceData;
+ using IsManifold = typename Base::IsManifold;
+ using MeshTag = typename Base::MeshTag;
+
+ using HasVertexData = typename Base::HasVertexData;
+ using HasHalfEdgeData = typename Base::HasHalfEdgeData;
+ using HasEdgeData = typename Base::HasEdgeData;
+ using HasFaceData = typename Base::HasFaceData;
+
+ using VertexDataCloud = typename Base::VertexDataCloud;
+ using HalfEdgeDataCloud = typename Base::HalfEdgeDataCloud;
+ using EdgeDataCloud = typename Base::EdgeDataCloud;
+ using FaceDataCloud = typename Base::FaceDataCloud;
+
+ // Indices
+ using VertexIndex = typename Base::VertexIndex;
+ using HalfEdgeIndex = typename Base::HalfEdgeIndex;
+ using EdgeIndex = typename Base::EdgeIndex;
+ using FaceIndex = typename Base::FaceIndex;
+
+ using VertexIndices = typename Base::VertexIndices;
+ using HalfEdgeIndices = typename Base::HalfEdgeIndices;
+ using EdgeIndices = typename Base::EdgeIndices;
+ using FaceIndices = typename Base::FaceIndices;
+
+ // Circulators
+ using VertexAroundVertexCirculator = typename Base::VertexAroundVertexCirculator;
+ using OutgoingHalfEdgeAroundVertexCirculator =
+ typename Base::OutgoingHalfEdgeAroundVertexCirculator;
+ using IncomingHalfEdgeAroundVertexCirculator =
+ typename Base::IncomingHalfEdgeAroundVertexCirculator;
+ using FaceAroundVertexCirculator = typename Base::FaceAroundVertexCirculator;
+ using VertexAroundFaceCirculator = typename Base::VertexAroundFaceCirculator;
+ using InnerHalfEdgeAroundFaceCirculator =
+ typename Base::InnerHalfEdgeAroundFaceCirculator;
+ using OuterHalfEdgeAroundFaceCirculator =
+ typename Base::OuterHalfEdgeAroundFaceCirculator;
+ using FaceAroundFaceCirculator = typename Base::FaceAroundFaceCirculator;
+
+ /** \brief Constructor. */
+ QuadMesh() : Base(), add_quad_(4) {}
+
+ /** \brief The base method of addFace is hidden because of the overloads in this
+ * class. */
+ using Base::addFace;
+
+ /** \brief Add a quad to the mesh. Data is only added if it is associated with the
+ * elements. The last vertex is connected with the first one.
+ * \param[in] idx_v_0 Index to the first vertex.
+ * \param[in] idx_v_1 Index to the second vertex.
+ * \param[in] idx_v_2 Index to the third vertex.
+ * \param[in] idx_v_3 Index to the fourth vertex.
+ * \param[in] face_data Data that is set for the face.
+ * \param[in] half_edge_data Data that is set for all added half-edges.
+ * \param[in] edge_data Data that is set for all added edges.
+ * \return Index to the new face. Failure is signaled by returning an invalid face
+ * index.
+ * \warning The vertices must be valid and unique (each vertex may be contained
+ * only once). Not complying with this requirement results in undefined behavior!
+ */
+ inline FaceIndex
+ addFace(const VertexIndex& idx_v_0,
+ const VertexIndex& idx_v_1,
+ const VertexIndex& idx_v_2,
+ const VertexIndex& idx_v_3,
+ const FaceData& face_data = FaceData(),
+ const EdgeData& edge_data = EdgeData(),
+ const HalfEdgeData& half_edge_data = HalfEdgeData())
+ {
+ add_quad_[0] = idx_v_0;
+ add_quad_[1] = idx_v_1;
+ add_quad_[2] = idx_v_2;
+ add_quad_[3] = idx_v_3;
+
+ return (this->addFaceImplBase(add_quad_, face_data, edge_data, half_edge_data));
+ }
+
+private:
+ // NOTE: Can't use the typedef of Base as a friend.
+ friend class pcl::geometry::
+ MeshBase<QuadMesh<MeshTraitsT>, MeshTraitsT, pcl::geometry::QuadMeshTag>;
+
+ /** \brief addFace for the quad mesh. */
+ inline FaceIndex
+ addFaceImpl(const VertexIndices& vertices,
+ const FaceData& face_data,
+ const EdgeData& edge_data,
+ const HalfEdgeData& half_edge_data)
{
- /** \brief Tag describing the type of the mesh. */
- struct QuadMeshTag {};
-
- /** \brief Half-edge mesh that can only store quads.
- * \tparam MeshTraitsT Please have a look at pcl::geometry::DefaultMeshTraits.
- * \author Martin Saelzle
- * \ingroup geometry
- */
- template <class MeshTraitsT>
- class QuadMesh : public pcl::geometry::MeshBase <QuadMesh <MeshTraitsT>, MeshTraitsT, QuadMeshTag>
- {
- public:
-
- using Base = pcl::geometry::MeshBase <QuadMesh <MeshTraitsT>, MeshTraitsT, QuadMeshTag>;
-
- using Self = QuadMesh<MeshTraitsT>;
- using Ptr = shared_ptr<Self>;
- using ConstPtr = shared_ptr<const Self>;
-
- using VertexData = typename Base::VertexData;
- using HalfEdgeData = typename Base::HalfEdgeData;
- using EdgeData = typename Base::EdgeData;
- using FaceData = typename Base::FaceData;
- using IsManifold = typename Base::IsManifold;
- using MeshTag = typename Base::MeshTag;
-
- using HasVertexData = typename Base::HasVertexData;
- using HasHalfEdgeData = typename Base::HasHalfEdgeData;
- using HasEdgeData = typename Base::HasEdgeData;
- using HasFaceData = typename Base::HasFaceData;
-
- using VertexDataCloud = typename Base::VertexDataCloud;
- using HalfEdgeDataCloud = typename Base::HalfEdgeDataCloud;
- using EdgeDataCloud = typename Base::EdgeDataCloud;
- using FaceDataCloud = typename Base::FaceDataCloud;
-
- // Indices
- using VertexIndex = typename Base::VertexIndex;
- using HalfEdgeIndex = typename Base::HalfEdgeIndex;
- using EdgeIndex = typename Base::EdgeIndex;
- using FaceIndex = typename Base::FaceIndex;
-
- using VertexIndices = typename Base::VertexIndices;
- using HalfEdgeIndices = typename Base::HalfEdgeIndices;
- using EdgeIndices = typename Base::EdgeIndices;
- using FaceIndices = typename Base::FaceIndices;
-
- // Circulators
- using VertexAroundVertexCirculator = typename Base::VertexAroundVertexCirculator;
- using OutgoingHalfEdgeAroundVertexCirculator = typename Base::OutgoingHalfEdgeAroundVertexCirculator;
- using IncomingHalfEdgeAroundVertexCirculator = typename Base::IncomingHalfEdgeAroundVertexCirculator;
- using FaceAroundVertexCirculator = typename Base::FaceAroundVertexCirculator;
- using VertexAroundFaceCirculator = typename Base::VertexAroundFaceCirculator;
- using InnerHalfEdgeAroundFaceCirculator = typename Base::InnerHalfEdgeAroundFaceCirculator;
- using OuterHalfEdgeAroundFaceCirculator = typename Base::OuterHalfEdgeAroundFaceCirculator;
- using FaceAroundFaceCirculator = typename Base::FaceAroundFaceCirculator;
-
- /** \brief Constructor. */
- QuadMesh ()
- : Base (),
- add_quad_ (4)
- {
- }
-
- /** \brief The base method of addFace is hidden because of the overloads in this class. */
- using Base::addFace;
-
- /** \brief Add a quad to the mesh. Data is only added if it is associated with the elements. The last vertex is connected with the first one.
- * \param[in] idx_v_0 Index to the first vertex.
- * \param[in] idx_v_1 Index to the second vertex.
- * \param[in] idx_v_2 Index to the third vertex.
- * \param[in] idx_v_3 Index to the fourth vertex.
- * \param[in] face_data Data that is set for the face.
- * \param[in] half_edge_data Data that is set for all added half-edges.
- * \param[in] edge_data Data that is set for all added edges.
- * \return Index to the new face. Failure is signaled by returning an invalid face index.
- * \warning The vertices must be valid and unique (each vertex may be contained only once). Not complying with this requirement results in undefined behavior!
- */
- inline FaceIndex
- addFace (const VertexIndex& idx_v_0,
- const VertexIndex& idx_v_1,
- const VertexIndex& idx_v_2,
- const VertexIndex& idx_v_3,
- const FaceData& face_data = FaceData (),
- const EdgeData& edge_data = EdgeData (),
- const HalfEdgeData& half_edge_data = HalfEdgeData ())
- {
- add_quad_ [0] = idx_v_0;
- add_quad_ [1] = idx_v_1;
- add_quad_ [2] = idx_v_2;
- add_quad_ [3] = idx_v_3;
-
- return (this->addFaceImplBase (add_quad_, face_data, edge_data, half_edge_data));
- }
-
- private:
-
- // NOTE: Can't use the typedef of Base as a friend.
- friend class pcl::geometry::MeshBase <QuadMesh <MeshTraitsT>, MeshTraitsT, pcl::geometry::QuadMeshTag>;
-
- /** \brief addFace for the quad mesh. */
- inline FaceIndex
- addFaceImpl (const VertexIndices& vertices,
- const FaceData& face_data,
- const EdgeData& edge_data,
- const HalfEdgeData& half_edge_data)
- {
- if (vertices.size () == 4)
- return (this->addFaceImplBase (vertices, face_data, edge_data, half_edge_data));
- return (FaceIndex ());
- }
-
- ////////////////////////////////////////////////////////////////////////
- // Members
- ////////////////////////////////////////////////////////////////////////
-
- /** \brief Storage for adding a quad. */
- VertexIndices add_quad_;
-
- public:
- PCL_MAKE_ALIGNED_OPERATOR_NEW
- };
- } // End namespace geom
+ if (vertices.size() == 4)
+ return (this->addFaceImplBase(vertices, face_data, edge_data, half_edge_data));
+ return (FaceIndex());
+ }
+
+ ////////////////////////////////////////////////////////////////////////
+ // Members
+ ////////////////////////////////////////////////////////////////////////
+
+ /** \brief Storage for adding a quad. */
+ VertexIndices add_quad_;
+
+public:
+ PCL_MAKE_ALIGNED_OPERATOR_NEW
+};
+} // namespace geometry
} // End namespace pcl
#pragma once
-#include <utility>
-
+#include <pcl/geometry/mesh_base.h>
#include <pcl/memory.h>
#include <pcl/pcl_macros.h>
-#include <pcl/geometry/mesh_base.h>
-namespace pcl
-{
- namespace geometry
+#include <utility>
+
+namespace pcl {
+namespace geometry {
+/** \brief Tag describing the type of the mesh. */
+struct TriangleMeshTag {};
+
+/** \brief Half-edge mesh that can only store triangles.
+ * \tparam MeshTraitsT Please have a look at pcl::geometry::DefaultMeshTraits.
+ * \author Martin Saelzle
+ * \ingroup geometry
+ */
+template <class MeshTraitsT>
+class TriangleMesh : public pcl::geometry::MeshBase<TriangleMesh<MeshTraitsT>,
+ MeshTraitsT,
+ TriangleMeshTag> {
+public:
+ using Base =
+ pcl::geometry::MeshBase<TriangleMesh<MeshTraitsT>, MeshTraitsT, TriangleMeshTag>;
+
+ using Self = TriangleMesh<MeshTraitsT>;
+ using Ptr = shared_ptr<Self>;
+ using ConstPtr = shared_ptr<const Self>;
+
+ using VertexData = typename Base::VertexData;
+ using HalfEdgeData = typename Base::HalfEdgeData;
+ using EdgeData = typename Base::EdgeData;
+ using FaceData = typename Base::FaceData;
+ using IsManifold = typename Base::IsManifold;
+ using MeshTag = typename Base::MeshTag;
+
+ using HasVertexData = typename Base::HasVertexData;
+ using HasHalfEdgeData = typename Base::HasHalfEdgeData;
+ using HasEdgeData = typename Base::HasEdgeData;
+ using HasFaceData = typename Base::HasFaceData;
+
+ using VertexDataCloud = typename Base::VertexDataCloud;
+ using HalfEdgeDataCloud = typename Base::HalfEdgeDataCloud;
+ using EdgeDataCloud = typename Base::EdgeDataCloud;
+ using FaceDataCloud = typename Base::FaceDataCloud;
+
+ // Indices
+ using VertexIndex = typename Base::VertexIndex;
+ using HalfEdgeIndex = typename Base::HalfEdgeIndex;
+ using EdgeIndex = typename Base::EdgeIndex;
+ using FaceIndex = typename Base::FaceIndex;
+ using FaceIndexPair = std::pair<FaceIndex, FaceIndex>;
+
+ using VertexIndices = typename Base::VertexIndices;
+ using HalfEdgeIndices = typename Base::HalfEdgeIndices;
+ using EdgeIndices = typename Base::EdgeIndices;
+ using FaceIndices = typename Base::FaceIndices;
+
+ // Circulators
+ using VertexAroundVertexCirculator = typename Base::VertexAroundVertexCirculator;
+ using OutgoingHalfEdgeAroundVertexCirculator =
+ typename Base::OutgoingHalfEdgeAroundVertexCirculator;
+ using IncomingHalfEdgeAroundVertexCirculator =
+ typename Base::IncomingHalfEdgeAroundVertexCirculator;
+ using FaceAroundVertexCirculator = typename Base::FaceAroundVertexCirculator;
+ using VertexAroundFaceCirculator = typename Base::VertexAroundFaceCirculator;
+ using InnerHalfEdgeAroundFaceCirculator =
+ typename Base::InnerHalfEdgeAroundFaceCirculator;
+ using OuterHalfEdgeAroundFaceCirculator =
+ typename Base::OuterHalfEdgeAroundFaceCirculator;
+ using FaceAroundFaceCirculator = typename Base::FaceAroundFaceCirculator;
+
+ /** \brief Constructor. */
+ TriangleMesh() : Base(), add_triangle_(3), inner_he_atp_(4), is_new_atp_(4) {}
+
+ /** \brief The base method of addFace is hidden because of the overloads in this
+ * class. */
+ using Base::addFace;
+
+ /** \brief Add a triangle to the mesh. Data is only added if it is associated with the
+ * elements. The last vertex is connected with the first one.
+ * \param[in] idx_v_0 Index to the first vertex.
+ * \param[in] idx_v_1 Index to the second vertex.
+ * \param[in] idx_v_2 Index to the third vertex.
+ * \param[in] face_data Data that is set for the face.
+ * \param[in] half_edge_data Data that is set for all added half-edges.
+ * \param[in] edge_data Data that is set for all added edges.
+ * \return Index to the new face. Failure is signaled by returning an invalid face
+ * index.
+ * \warning The vertices must be valid and unique (each vertex may be contained
+ * only once). Not complying with this requirement results in undefined behavior!
+ */
+ inline FaceIndex
+ addFace(const VertexIndex& idx_v_0,
+ const VertexIndex& idx_v_1,
+ const VertexIndex& idx_v_2,
+ const FaceData& face_data = FaceData(),
+ const EdgeData& edge_data = EdgeData(),
+ const HalfEdgeData& half_edge_data = HalfEdgeData())
+ {
+ add_triangle_[0] = idx_v_0;
+ add_triangle_[1] = idx_v_1;
+ add_triangle_[2] = idx_v_2;
+
+ return (this->addFaceImplBase(add_triangle_, face_data, edge_data, half_edge_data));
+ }
+
+ /** \brief Add two triangles for the four given input vertices. When using a manifold
+ * triangle mesh it is not possible to connect two bounded regions without going
+ * through a non-manifold intermediate step. This method first tries to add the
+ * triangles individually and if this fails connects the whole configuration at once
+ * (if possible).
+ * \param[in] vertices Indices to the vertices of the new face. (The size
+ * must be equal to four).
+ * \param[in] face_data Data that is set for the face.
+ * \param[in] half_edge_data Data that is set for all added half-edges.
+ * \param[in] edge_data Data that is set for all added edges.
+ * \return Pair of face indices. The first index is valid if one triangle was added.
+ * Both indices are valid if two triangles were added.
+ * \warning The vertices must be valid and unique (each vertex may be contained only
+ * once). Not complying with this requirement results in undefined behavior!
+ */
+ FaceIndexPair
+ addTrianglePair(const VertexIndices& vertices,
+ const FaceData& face_data = FaceData(),
+ const EdgeData& edge_data = EdgeData(),
+ const HalfEdgeData& half_edge_data = HalfEdgeData())
+ {
+ if (vertices.size() != 4) {
+ return (std::make_pair(FaceIndex(), FaceIndex()));
+ }
+ return (this->addTrianglePair(vertices[0],
+ vertices[1],
+ vertices[2],
+ vertices[3],
+ face_data,
+ edge_data,
+ half_edge_data));
+ }
+
+ /** \brief Add two triangles for the four given input vertices. When using a manifold
+ * triangle mesh it is not possible to connect two bounded regions without going
+ * through a non-manifold intermediate step. This method first tries to add the
+ * triangles individually and if this fails connects the whole configuration at once
+ * (if possible).
+ * \param[in] idx_v_0 Index to the first vertex.
+ * \param[in] idx_v_1 Index to the second vertex.
+ * \param[in] idx_v_2 Index to the third vertex.
+ * \param[in] idx_v_3 Index to the fourth vertex.
+ * \param[in] face_data Data that is set for the face.
+ * \param[in] half_edge_data Data that is set for all added half-edges.
+ * \param[in] edge_data Data that is set for all added edges.
+ * \return Pair of face indices. The first index is valid if one triangle was added.
+ * Both indices are valid if two triangles were added.
+ * \warning The vertices must be valid and unique (each vertex may be contained only
+ * once). Not complying with this requirement results in undefined behavior!
+ */
+ inline FaceIndexPair
+ addTrianglePair(const VertexIndex& idx_v_0,
+ const VertexIndex& idx_v_1,
+ const VertexIndex& idx_v_2,
+ const VertexIndex& idx_v_3,
+ const FaceData& face_data = FaceData(),
+ const EdgeData& edge_data = EdgeData(),
+ const HalfEdgeData& half_edge_data = HalfEdgeData())
+ {
+ // Try to add two faces
+ // 3 - 2
+ // | / |
+ // 0 - 1
+ FaceIndex idx_face_0 = this->addFace(idx_v_0, idx_v_1, idx_v_2, face_data);
+ FaceIndex idx_face_1 = this->addFace(idx_v_0, idx_v_2, idx_v_3, face_data);
+
+ if (idx_face_0.isValid()) {
+ return (std::make_pair(idx_face_0, idx_face_1));
+ }
+ if (idx_face_1.isValid()) {
+ idx_face_0 = this->addFace(
+ idx_v_0, idx_v_1, idx_v_2, face_data); // might be possible to add now
+ return (std::make_pair(idx_face_1, idx_face_0));
+ }
+
+ // Try to add two faces
+ // 3 - 2
+ // | \ |
+ // 0 - 1
+ idx_face_0 = this->addFace(idx_v_1, idx_v_2, idx_v_3, face_data);
+ idx_face_1 = this->addFace(idx_v_0, idx_v_1, idx_v_3, face_data);
+
+ if (idx_face_0.isValid()) {
+ return (std::make_pair(idx_face_0, idx_face_1));
+ }
+ if (idx_face_1.isValid()) {
+ idx_face_0 = this->addFace(
+ idx_v_1, idx_v_2, idx_v_3, face_data); // might be possible to add now
+ return (std::make_pair(idx_face_1, idx_face_0));
+ }
+
+ if (!IsManifold::value) {
+ return (std::make_pair(FaceIndex(), FaceIndex()));
+ }
+
+ // Check manifoldness
+ if (!Base::checkTopology1(
+ idx_v_0, idx_v_1, inner_he_atp_[0], is_new_atp_[0], IsManifold()) ||
+ !Base::checkTopology1(
+ idx_v_1, idx_v_2, inner_he_atp_[1], is_new_atp_[1], IsManifold()) ||
+ !Base::checkTopology1(
+ idx_v_2, idx_v_3, inner_he_atp_[2], is_new_atp_[2], IsManifold()) ||
+ !Base::checkTopology1(
+ idx_v_3, idx_v_0, inner_he_atp_[3], is_new_atp_[3], IsManifold())) {
+ return (std::make_pair(FaceIndex(), FaceIndex()));
+ }
+
+ // Connect the triangle pair
+ if (!is_new_atp_[0] && is_new_atp_[1] && !is_new_atp_[2] && is_new_atp_[3]) {
+ return (this->connectTrianglePair(inner_he_atp_[0],
+ inner_he_atp_[2],
+ idx_v_0,
+ idx_v_1,
+ idx_v_2,
+ idx_v_3,
+ face_data,
+ edge_data,
+ half_edge_data));
+ }
+ if (is_new_atp_[0] && !is_new_atp_[1] && is_new_atp_[2] && !is_new_atp_[3]) {
+ return (this->connectTrianglePair(inner_he_atp_[1],
+ inner_he_atp_[3],
+ idx_v_1,
+ idx_v_2,
+ idx_v_3,
+ idx_v_0,
+ face_data,
+ edge_data,
+ half_edge_data));
+ }
+ return (std::make_pair(FaceIndex(), FaceIndex()));
+ }
+
+private:
+ // NOTE: Can't use the typedef of Base as a friend.
+ friend class pcl::geometry::
+ MeshBase<TriangleMesh<MeshTraitsT>, MeshTraitsT, pcl::geometry::TriangleMeshTag>;
+
+ /** \brief addFace for the triangular mesh. */
+ inline FaceIndex
+ addFaceImpl(const VertexIndices& vertices,
+ const FaceData& face_data,
+ const EdgeData& edge_data,
+ const HalfEdgeData& half_edge_data)
+ {
+ if (vertices.size() == 3)
+ return (this->addFaceImplBase(vertices, face_data, edge_data, half_edge_data));
+ return (FaceIndex());
+ }
+
+ /** \brief Connect the triangles a-b-c and a-c-d. The edges a-b and c-d must be old
+ * and the edges b-c and d-a must be new. */
+ // d - c
+ // | / |
+ // a - b
+ FaceIndexPair
+ connectTrianglePair(const HalfEdgeIndex& idx_he_ab,
+ const HalfEdgeIndex& idx_he_cd,
+ const VertexIndex& idx_v_a,
+ const VertexIndex& idx_v_b,
+ const VertexIndex& idx_v_c,
+ const VertexIndex& idx_v_d,
+ const FaceData& face_data,
+ const EdgeData& edge_data,
+ const HalfEdgeData& he_data)
{
- /** \brief Tag describing the type of the mesh. */
- struct TriangleMeshTag {};
-
- /** \brief Half-edge mesh that can only store triangles.
- * \tparam MeshTraitsT Please have a look at pcl::geometry::DefaultMeshTraits.
- * \author Martin Saelzle
- * \ingroup geometry
- */
- template <class MeshTraitsT>
- class TriangleMesh : public pcl::geometry::MeshBase <TriangleMesh <MeshTraitsT>, MeshTraitsT, TriangleMeshTag>
- {
- public:
-
- using Base = pcl::geometry::MeshBase <TriangleMesh <MeshTraitsT>, MeshTraitsT, TriangleMeshTag>;
-
- using Self = TriangleMesh<MeshTraitsT>;
- using Ptr = shared_ptr<Self>;
- using ConstPtr = shared_ptr<const Self>;
-
- using VertexData = typename Base::VertexData;
- using HalfEdgeData = typename Base::HalfEdgeData;
- using EdgeData = typename Base::EdgeData;
- using FaceData = typename Base::FaceData;
- using IsManifold = typename Base::IsManifold;
- using MeshTag = typename Base::MeshTag;
-
- using HasVertexData = typename Base::HasVertexData;
- using HasHalfEdgeData = typename Base::HasHalfEdgeData;
- using HasEdgeData = typename Base::HasEdgeData;
- using HasFaceData = typename Base::HasFaceData;
-
- using VertexDataCloud = typename Base::VertexDataCloud;
- using HalfEdgeDataCloud = typename Base::HalfEdgeDataCloud;
- using EdgeDataCloud = typename Base::EdgeDataCloud;
- using FaceDataCloud = typename Base::FaceDataCloud;
-
- // Indices
- using VertexIndex = typename Base::VertexIndex;
- using HalfEdgeIndex = typename Base::HalfEdgeIndex;
- using EdgeIndex = typename Base::EdgeIndex;
- using FaceIndex = typename Base::FaceIndex;
- using FaceIndexPair = std::pair <FaceIndex, FaceIndex>;
-
- using VertexIndices = typename Base::VertexIndices;
- using HalfEdgeIndices = typename Base::HalfEdgeIndices;
- using EdgeIndices = typename Base::EdgeIndices;
- using FaceIndices = typename Base::FaceIndices;
-
- // Circulators
- using VertexAroundVertexCirculator = typename Base::VertexAroundVertexCirculator;
- using OutgoingHalfEdgeAroundVertexCirculator = typename Base::OutgoingHalfEdgeAroundVertexCirculator;
- using IncomingHalfEdgeAroundVertexCirculator = typename Base::IncomingHalfEdgeAroundVertexCirculator;
- using FaceAroundVertexCirculator = typename Base::FaceAroundVertexCirculator;
- using VertexAroundFaceCirculator = typename Base::VertexAroundFaceCirculator;
- using InnerHalfEdgeAroundFaceCirculator = typename Base::InnerHalfEdgeAroundFaceCirculator;
- using OuterHalfEdgeAroundFaceCirculator = typename Base::OuterHalfEdgeAroundFaceCirculator;
- using FaceAroundFaceCirculator = typename Base::FaceAroundFaceCirculator;
-
- /** \brief Constructor. */
- TriangleMesh ()
- : Base (),
- add_triangle_ (3),
- inner_he_atp_ (4),
- is_new_atp_ (4)
- {
- }
-
- /** \brief The base method of addFace is hidden because of the overloads in this class. */
- using Base::addFace;
-
- /** \brief Add a triangle to the mesh. Data is only added if it is associated with the elements. The last vertex is connected with the first one.
- * \param[in] idx_v_0 Index to the first vertex.
- * \param[in] idx_v_1 Index to the second vertex.
- * \param[in] idx_v_2 Index to the third vertex.
- * \param[in] face_data Data that is set for the face.
- * \param[in] half_edge_data Data that is set for all added half-edges.
- * \param[in] edge_data Data that is set for all added edges.
- * \return Index to the new face. Failure is signaled by returning an invalid face index.
- * \warning The vertices must be valid and unique (each vertex may be contained only once). Not complying with this requirement results in undefined behavior!
- */
- inline FaceIndex
- addFace (const VertexIndex& idx_v_0,
- const VertexIndex& idx_v_1,
- const VertexIndex& idx_v_2,
- const FaceData& face_data = FaceData (),
- const EdgeData& edge_data = EdgeData (),
- const HalfEdgeData& half_edge_data = HalfEdgeData ())
- {
- add_triangle_ [0] = idx_v_0;
- add_triangle_ [1] = idx_v_1;
- add_triangle_ [2] = idx_v_2;
-
- return (this->addFaceImplBase (add_triangle_, face_data, edge_data, half_edge_data));
- }
-
- /** \brief Add two triangles for the four given input vertices. When using a manifold triangle mesh it is not possible to connect two bounded regions without going through a non-manifold intermediate step. This method first tries to add the triangles individually and if this fails connects the whole configuration at once (if possible).
- * \param[in] vertices Indices to the vertices of the new face. (The size must be equal to four).
- * \param[in] face_data Data that is set for the face.
- * \param[in] half_edge_data Data that is set for all added half-edges.
- * \param[in] edge_data Data that is set for all added edges.
- * \return Pair of face indices. The first index is valid if one triangle was added. Both indices are valid if two triangles were added.
- * \warning The vertices must be valid and unique (each vertex may be contained only once). Not complying with this requirement results in undefined behavior!
- */
- FaceIndexPair
- addTrianglePair (const VertexIndices& vertices,
- const FaceData& face_data = FaceData (),
- const EdgeData& edge_data = EdgeData (),
- const HalfEdgeData& half_edge_data = HalfEdgeData ())
- {
- if (vertices.size () != 4)
- {
- return (std::make_pair (FaceIndex (), FaceIndex ()));
- }
- return (this->addTrianglePair (vertices [0], vertices [1], vertices [2], vertices [3], face_data, edge_data, half_edge_data));
- }
-
- /** \brief Add two triangles for the four given input vertices. When using a manifold triangle mesh it is not possible to connect two bounded regions without going through a non-manifold intermediate step. This method first tries to add the triangles individually and if this fails connects the whole configuration at once (if possible).
- * \param[in] idx_v_0 Index to the first vertex.
- * \param[in] idx_v_1 Index to the second vertex.
- * \param[in] idx_v_2 Index to the third vertex.
- * \param[in] idx_v_3 Index to the fourth vertex.
- * \param[in] face_data Data that is set for the face.
- * \param[in] half_edge_data Data that is set for all added half-edges.
- * \param[in] edge_data Data that is set for all added edges.
- * \return Pair of face indices. The first index is valid if one triangle was added. Both indices are valid if two triangles were added.
- * \warning The vertices must be valid and unique (each vertex may be contained only once). Not complying with this requirement results in undefined behavior!
- */
- inline FaceIndexPair
- addTrianglePair (const VertexIndex& idx_v_0,
- const VertexIndex& idx_v_1,
- const VertexIndex& idx_v_2,
- const VertexIndex& idx_v_3,
- const FaceData& face_data = FaceData (),
- const EdgeData& edge_data = EdgeData (),
- const HalfEdgeData& half_edge_data = HalfEdgeData ())
- {
- // Try to add two faces
- // 3 - 2
- // | / |
- // 0 - 1
- FaceIndex idx_face_0 = this->addFace (idx_v_0, idx_v_1, idx_v_2, face_data);
- FaceIndex idx_face_1 = this->addFace (idx_v_0, idx_v_2, idx_v_3, face_data);
-
- if (idx_face_0.isValid ())
- {
- return (std::make_pair (idx_face_0, idx_face_1));
- }
- if (idx_face_1.isValid ())
- {
- idx_face_0 = this->addFace (idx_v_0, idx_v_1, idx_v_2, face_data); // might be possible to add now
- return (std::make_pair (idx_face_1, idx_face_0));
- }
-
- // Try to add two faces
- // 3 - 2
- // | \ |
- // 0 - 1
- idx_face_0 = this->addFace (idx_v_1, idx_v_2, idx_v_3, face_data);
- idx_face_1 = this->addFace (idx_v_0, idx_v_1, idx_v_3, face_data);
-
- if (idx_face_0.isValid ())
- {
- return (std::make_pair (idx_face_0, idx_face_1));
- }
- if (idx_face_1.isValid ())
- {
- idx_face_0 = this->addFace (idx_v_1, idx_v_2, idx_v_3, face_data); // might be possible to add now
- return (std::make_pair (idx_face_1, idx_face_0));
- }
-
- if (!IsManifold::value)
- {
- return (std::make_pair (FaceIndex (), FaceIndex ()));
- }
-
- // Check manifoldness
- if (!Base::checkTopology1 (idx_v_0,idx_v_1, inner_he_atp_ [0], is_new_atp_ [0], IsManifold ()) ||
- !Base::checkTopology1 (idx_v_1,idx_v_2, inner_he_atp_ [1], is_new_atp_ [1], IsManifold ()) ||
- !Base::checkTopology1 (idx_v_2,idx_v_3, inner_he_atp_ [2], is_new_atp_ [2], IsManifold ()) ||
- !Base::checkTopology1 (idx_v_3,idx_v_0, inner_he_atp_ [3], is_new_atp_ [3], IsManifold ()))
- {
- return (std::make_pair (FaceIndex (), FaceIndex ()));
- }
-
- // Connect the triangle pair
- if (!is_new_atp_ [0] && is_new_atp_ [1] && !is_new_atp_ [2] && is_new_atp_ [3])
- {
- return (this->connectTrianglePair (inner_he_atp_ [0], inner_he_atp_ [2], idx_v_0, idx_v_1, idx_v_2, idx_v_3, face_data, edge_data, half_edge_data));
- }
- if (is_new_atp_ [0] && !is_new_atp_ [1] && is_new_atp_ [2] && !is_new_atp_ [3])
- {
- return (this->connectTrianglePair (inner_he_atp_ [1], inner_he_atp_ [3], idx_v_1, idx_v_2, idx_v_3, idx_v_0, face_data, edge_data, half_edge_data));
- }
- return (std::make_pair (FaceIndex (), FaceIndex ()));
- }
-
- private:
-
- // NOTE: Can't use the typedef of Base as a friend.
- friend class pcl::geometry::MeshBase <TriangleMesh <MeshTraitsT>, MeshTraitsT, pcl::geometry::TriangleMeshTag>;
-
- /** \brief addFace for the triangular mesh. */
- inline FaceIndex
- addFaceImpl (const VertexIndices& vertices,
- const FaceData& face_data,
- const EdgeData& edge_data,
- const HalfEdgeData& half_edge_data)
- {
- if (vertices.size () == 3)
- return (this->addFaceImplBase (vertices, face_data, edge_data, half_edge_data));
- return (FaceIndex ());
- }
-
- /** \brief Connect the triangles a-b-c and a-c-d. The edges a-b and c-d must be old and the edges b-c and d-a must be new. */
- // d - c
- // | / |
- // a - b
- FaceIndexPair
- connectTrianglePair (const HalfEdgeIndex& idx_he_ab,
- const HalfEdgeIndex& idx_he_cd,
- const VertexIndex& idx_v_a,
- const VertexIndex& idx_v_b,
- const VertexIndex& idx_v_c,
- const VertexIndex& idx_v_d,
- const FaceData& face_data,
- const EdgeData& edge_data,
- const HalfEdgeData& he_data)
- {
- // Add new half-edges
- const HalfEdgeIndex idx_he_bc = Base::addEdge (idx_v_b, idx_v_c, he_data, edge_data);
- const HalfEdgeIndex idx_he_da = Base::addEdge (idx_v_d, idx_v_a, he_data, edge_data);
- const HalfEdgeIndex idx_he_ca = Base::addEdge (idx_v_c, idx_v_a, he_data, edge_data);
-
- const HalfEdgeIndex idx_he_cb = Base::getOppositeHalfEdgeIndex (idx_he_bc);
- const HalfEdgeIndex idx_he_ad = Base::getOppositeHalfEdgeIndex (idx_he_da);
- const HalfEdgeIndex idx_he_ac = Base::getOppositeHalfEdgeIndex (idx_he_ca);
-
- // Get the existing half-edges
- const HalfEdgeIndex idx_he_ab_prev = Base::getPrevHalfEdgeIndex (idx_he_ab); // No reference!
- const HalfEdgeIndex idx_he_ab_next = Base::getNextHalfEdgeIndex (idx_he_ab); // No reference!
-
- const HalfEdgeIndex idx_he_cd_prev = Base::getPrevHalfEdgeIndex (idx_he_cd); // No reference!
- const HalfEdgeIndex idx_he_cd_next = Base::getNextHalfEdgeIndex (idx_he_cd); // No reference!
-
- // Connect the outer half-edges
- Base::connectPrevNext (idx_he_ab_prev, idx_he_ad );
- Base::connectPrevNext (idx_he_ad , idx_he_cd_next);
- Base::connectPrevNext (idx_he_cd_prev, idx_he_cb );
- Base::connectPrevNext (idx_he_cb , idx_he_ab_next);
-
- // Connect the inner half-edges
- Base::connectPrevNext (idx_he_ab, idx_he_bc);
- Base::connectPrevNext (idx_he_bc, idx_he_ca);
- Base::connectPrevNext (idx_he_ca, idx_he_ab);
-
- Base::connectPrevNext (idx_he_ac, idx_he_cd);
- Base::connectPrevNext (idx_he_cd, idx_he_da);
- Base::connectPrevNext (idx_he_da, idx_he_ac);
-
- // Connect the vertices to the boundary half-edges
- Base::setOutgoingHalfEdgeIndex (idx_v_a, idx_he_ad );
- Base::setOutgoingHalfEdgeIndex (idx_v_b, idx_he_ab_next);
- Base::setOutgoingHalfEdgeIndex (idx_v_c, idx_he_cb );
- Base::setOutgoingHalfEdgeIndex (idx_v_d, idx_he_cd_next);
-
- // Add and connect the faces
- HalfEdgeIndices inner_he_abc; inner_he_abc.reserve (3);
- inner_he_abc.push_back (idx_he_ab);
- inner_he_abc.push_back (idx_he_bc);
- inner_he_abc.push_back (idx_he_ca);
-
- HalfEdgeIndices inner_he_acd; inner_he_acd.reserve (3);
- inner_he_acd.push_back (idx_he_ac);
- inner_he_acd.push_back (idx_he_cd);
- inner_he_acd.push_back (idx_he_da);
-
- const FaceIndex idx_f_abc = Base::connectFace (inner_he_abc, face_data);
- const FaceIndex idx_f_acd = Base::connectFace (inner_he_acd, face_data);
-
- return (std::make_pair (idx_f_abc, idx_f_acd));
- }
-
- ////////////////////////////////////////////////////////////////////////
- // Members
- ////////////////////////////////////////////////////////////////////////
-
- /** \brief Storage for adding a triangle. */
- VertexIndices add_triangle_;
-
- /** \brief Storage for addTrianglePair. */
- HalfEdgeIndices inner_he_atp_;
-
- /** \brief Storage for addTrianglePair. */
- std::vector <bool> is_new_atp_;
-
- public:
- PCL_MAKE_ALIGNED_OPERATOR_NEW
- };
- } // End namespace geom
+ // Add new half-edges
+ const HalfEdgeIndex idx_he_bc = Base::addEdge(idx_v_b, idx_v_c, he_data, edge_data);
+ const HalfEdgeIndex idx_he_da = Base::addEdge(idx_v_d, idx_v_a, he_data, edge_data);
+ const HalfEdgeIndex idx_he_ca = Base::addEdge(idx_v_c, idx_v_a, he_data, edge_data);
+
+ const HalfEdgeIndex idx_he_cb = Base::getOppositeHalfEdgeIndex(idx_he_bc);
+ const HalfEdgeIndex idx_he_ad = Base::getOppositeHalfEdgeIndex(idx_he_da);
+ const HalfEdgeIndex idx_he_ac = Base::getOppositeHalfEdgeIndex(idx_he_ca);
+
+ // Get the existing half-edges
+ const HalfEdgeIndex idx_he_ab_prev =
+ Base::getPrevHalfEdgeIndex(idx_he_ab); // No reference!
+ const HalfEdgeIndex idx_he_ab_next =
+ Base::getNextHalfEdgeIndex(idx_he_ab); // No reference!
+
+ const HalfEdgeIndex idx_he_cd_prev =
+ Base::getPrevHalfEdgeIndex(idx_he_cd); // No reference!
+ const HalfEdgeIndex idx_he_cd_next =
+ Base::getNextHalfEdgeIndex(idx_he_cd); // No reference!
+
+ // Connect the outer half-edges
+ Base::connectPrevNext(idx_he_ab_prev, idx_he_ad);
+ Base::connectPrevNext(idx_he_ad, idx_he_cd_next);
+ Base::connectPrevNext(idx_he_cd_prev, idx_he_cb);
+ Base::connectPrevNext(idx_he_cb, idx_he_ab_next);
+
+ // Connect the inner half-edges
+ Base::connectPrevNext(idx_he_ab, idx_he_bc);
+ Base::connectPrevNext(idx_he_bc, idx_he_ca);
+ Base::connectPrevNext(idx_he_ca, idx_he_ab);
+
+ Base::connectPrevNext(idx_he_ac, idx_he_cd);
+ Base::connectPrevNext(idx_he_cd, idx_he_da);
+ Base::connectPrevNext(idx_he_da, idx_he_ac);
+
+ // Connect the vertices to the boundary half-edges
+ Base::setOutgoingHalfEdgeIndex(idx_v_a, idx_he_ad);
+ Base::setOutgoingHalfEdgeIndex(idx_v_b, idx_he_ab_next);
+ Base::setOutgoingHalfEdgeIndex(idx_v_c, idx_he_cb);
+ Base::setOutgoingHalfEdgeIndex(idx_v_d, idx_he_cd_next);
+
+ // Add and connect the faces
+ HalfEdgeIndices inner_he_abc;
+ inner_he_abc.reserve(3);
+ inner_he_abc.push_back(idx_he_ab);
+ inner_he_abc.push_back(idx_he_bc);
+ inner_he_abc.push_back(idx_he_ca);
+
+ HalfEdgeIndices inner_he_acd;
+ inner_he_acd.reserve(3);
+ inner_he_acd.push_back(idx_he_ac);
+ inner_he_acd.push_back(idx_he_cd);
+ inner_he_acd.push_back(idx_he_da);
+
+ const FaceIndex idx_f_abc = Base::connectFace(inner_he_abc, face_data);
+ const FaceIndex idx_f_acd = Base::connectFace(inner_he_acd, face_data);
+
+ return (std::make_pair(idx_f_abc, idx_f_acd));
+ }
+
+ ////////////////////////////////////////////////////////////////////////
+ // Members
+ ////////////////////////////////////////////////////////////////////////
+
+ /** \brief Storage for adding a triangle. */
+ VertexIndices add_triangle_;
+
+ /** \brief Storage for addTrianglePair. */
+ HalfEdgeIndices inner_he_atp_;
+
+ /** \brief Storage for addTrianglePair. */
+ std::vector<bool> is_new_atp_;
+
+public:
+ PCL_MAKE_ALIGNED_OPERATOR_NEW
+};
+} // namespace geometry
} // End namespace pcl
if(CMAKE_COMPILER_IS_GNUCXX)
string(REPLACE "-Wold-style-cast" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
string(REPLACE "-Wno-invalid-offsetof" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-conversion -Wno-unused-parameter -Wno-unused-variable -Wno-unused-function -Wno-unused-but-set-variable")
+ string(APPEND CMAKE_CXX_FLAGS " -Wno-conversion -Wno-unused-parameter -Wno-unused-variable -Wno-unused-function -Wno-unused-but-set-variable")
+ # allow deprecation warnings in Eigen(3.3.7)/Core, see here: https://gitlab.kitware.com/vtk/vtk/-/issues/17661
+ string(APPEND CMAKE_CXX_FLAGS " -Wno-error=cpp")
+ # allow maybe-uninitialized warnings from thrust library.
+ string(APPEND CMAKE_CXX_FLAGS " -Wno-error=maybe-uninitialized")
endif()
collect_subproject_directory_names("${CMAKE_CURRENT_SOURCE_DIR}" "CMakeLists.txt" PCL_GPU_MODULES_NAMES PCL_GPU_MODULES_DIRS)
#pragma once
-#include <pcl/pcl_exports.h>
#include <pcl/gpu/containers/device_memory.h>
+#include <pcl/pcl_exports.h>
#include <vector>
-namespace pcl
-{
- namespace gpu
- {
- //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
- /** \brief @b DeviceArray class
- *
- * \note Typed container for GPU memory with reference counting.
- *
- * \author Anatoly Baksheev
- */
- template<class T>
- class PCL_EXPORTS DeviceArray : public DeviceMemory
- {
- public:
- /** \brief Element type. */
- using type = T;
-
- /** \brief Element size. */
- enum { elem_size = sizeof(T) };
-
- /** \brief Empty constructor. */
- DeviceArray();
-
- /** \brief Allocates internal buffer in GPU memory
- * \param size number of elements to allocate
- * */
- DeviceArray(std::size_t size);
-
- /** \brief Initializes with user allocated buffer. Reference counting is disabled in this case.
- * \param ptr pointer to buffer
- * \param size elements number
- * */
- DeviceArray(T *ptr, std::size_t size);
-
- /** \brief Copy constructor. Just increments reference counter. */
- DeviceArray(const DeviceArray& other);
-
- /** \brief Assignment operator. Just increments reference counter. */
- DeviceArray& operator = (const DeviceArray& other);
-
- /** \brief Allocates internal buffer in GPU memory. If internal buffer was created before the function recreates it with new size. If new and old sizes are equal it does nothing.
- * \param size elements number
- * */
- void create(std::size_t size);
-
- /** \brief Decrements reference counter and releases internal buffer if needed. */
- void release();
-
- /** \brief Performs data copying. If destination size differs it will be reallocated.
- * \param other destination container
- * */
- void copyTo(DeviceArray& other) const;
-
- /** \brief Uploads data to internal buffer in GPU memory. It calls create() inside to ensure that intenal buffer size is enough.
- * \param host_ptr pointer to buffer to upload
- * \param size elements number
- * */
- void upload(const T *host_ptr, std::size_t size);
-
- /** \brief Downloads data from internal buffer to CPU memory
- * \param host_ptr pointer to buffer to download
- * */
- void download(T *host_ptr) const;
-
- /** \brief Uploads data to internal buffer in GPU memory. It calls create() inside to ensure that intenal buffer size is enough.
- * \param data host vector to upload from
- * */
- template<class A>
- void upload(const std::vector<T, A>& data);
-
- /** \brief Downloads data from internal buffer to CPU memory
- * \param data host vector to download to
- * */
- template<typename A>
- void download(std::vector<T, A>& data) const;
-
- /** \brief Performs swap of data pointed with another device array.
- * \param other_arg device array to swap with
- * */
- void swap(DeviceArray& other_arg);
-
- /** \brief Returns pointer for internal buffer in GPU memory. */
- T* ptr();
-
- /** \brief Returns const pointer for internal buffer in GPU memory. */
- const T* ptr() const;
-
- //using DeviceMemory::ptr;
-
- /** \brief Returns pointer for internal buffer in GPU memory. */
- operator T*();
-
- /** \brief Returns const pointer for internal buffer in GPU memory. */
- operator const T*() const;
-
- /** \brief Returns size in elements. */
- std::size_t size() const;
- };
-
-
- //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
- /** \brief @b DeviceArray2D class
- *
- * \note Typed container for pitched GPU memory with reference counting.
- *
- * \author Anatoly Baksheev
- */
- template<class T>
- class PCL_EXPORTS DeviceArray2D : public DeviceMemory2D
- {
- public:
- /** \brief Element type. */
- using type = T;
-
- /** \brief Element size. */
- enum { elem_size = sizeof(T) };
-
- /** \brief Empty constructor. */
- DeviceArray2D();
-
- /** \brief Allocates internal buffer in GPU memory
- * \param rows number of rows to allocate
- * \param cols number of elements in each row
- * */
- DeviceArray2D(int rows, int cols);
-
- /** \brief Initializes with user allocated buffer. Reference counting is disabled in this case.
- * \param rows number of rows
- * \param cols number of elements in each row
- * \param data pointer to buffer
- * \param stepBytes stride between two consecutive rows in bytes
- * */
- DeviceArray2D(int rows, int cols, void *data, std::size_t stepBytes);
-
- /** \brief Copy constructor. Just increments reference counter. */
- DeviceArray2D(const DeviceArray2D& other);
-
- /** \brief Assignment operator. Just increments reference counter. */
- DeviceArray2D& operator = (const DeviceArray2D& other);
-
- /** \brief Allocates internal buffer in GPU memory. If internal buffer was created before the function recreates it with new size. If new and old sizes are equal it does nothing.
- * \param rows number of rows to allocate
- * \param cols number of elements in each row
- * */
- void create(int rows, int cols);
-
- /** \brief Decrements reference counter and releases internal buffer if needed. */
- void release();
-
- /** \brief Performs data copying. If destination size differs it will be reallocated.
- * \param other destination container
- * */
- void copyTo(DeviceArray2D& other) const;
-
- /** \brief Uploads data to internal buffer in GPU memory. It calls create() inside to ensure that intenal buffer size is enough.
- * \param host_ptr pointer to host buffer to upload
- * \param host_step stride between two consecutive rows in bytes for host buffer
- * \param rows number of rows to upload
- * \param cols number of elements in each row
- * */
- void upload(const void *host_ptr, std::size_t host_step, int rows, int cols);
-
- /** \brief Downloads data from internal buffer to CPU memory. User is responsible for correct host buffer size.
- * \param host_ptr pointer to host buffer to download
- * \param host_step stride between two consecutive rows in bytes for host buffer
- * */
- void download(void *host_ptr, std::size_t host_step) const;
-
- /** \brief Performs swap of data pointed with another device array.
- * \param other_arg device array to swap with
- * */
- void swap(DeviceArray2D& other_arg);
-
- /** \brief Uploads data to internal buffer in GPU memory. It calls create() inside to ensure that intenal buffer size is enough.
- * \param data host vector to upload from
- * \param cols stride in elements between two consecutive rows for host buffer
- * */
- template<class A>
- void upload(const std::vector<T, A>& data, int cols);
-
- /** \brief Downloads data from internal buffer to CPU memory
- * \param data host vector to download to
- * \param cols Output stride in elements between two consecutive rows for host vector.
- * */
- template<class A>
- void download(std::vector<T, A>& data, int& cols) const;
-
- /** \brief Returns pointer to given row in internal buffer.
- * \param y row index
- * */
- T* ptr(int y = 0);
-
- /** \brief Returns const pointer to given row in internal buffer.
- * \param y row index
- * */
- const T* ptr(int y = 0) const;
-
- //using DeviceMemory2D::ptr;
-
- /** \brief Returns pointer for internal buffer in GPU memory. */
- operator T*();
-
- /** \brief Returns const pointer for internal buffer in GPU memory. */
- operator const T*() const;
-
- /** \brief Returns number of elements in each row. */
- int cols() const;
-
- /** \brief Returns number of rows. */
- int rows() const;
-
- /** \brief Returns step in elements. */
- std::size_t elem_step() const;
- };
- }
-
- namespace device
- {
- using pcl::gpu::DeviceArray;
- using pcl::gpu::DeviceArray2D;
- }
-}
+namespace pcl {
+namespace gpu {
+//////////////////////////////////////////////////////////////////////////////
+/** \brief @b DeviceArray class
+ *
+ * \note Typed container for GPU memory with reference counting.
+ *
+ * \author Anatoly Baksheev
+ */
+template <class T>
+class PCL_EXPORTS DeviceArray : public DeviceMemory {
+public:
+ /** \brief Element type. */
+ using type = T;
+
+ /** \brief Element size. */
+ enum { elem_size = sizeof(T) };
+
+ /** \brief Empty constructor. */
+ DeviceArray();
+
+ /** \brief Allocates internal buffer in GPU memory
+ * \param size number of elements to allocate
+ * */
+ DeviceArray(std::size_t size);
+
+ /** \brief Initializes with user allocated buffer. Reference counting is disabled in
+ * this case.
+ * \param ptr pointer to buffer
+ * \param size elements number
+ * */
+ DeviceArray(T* ptr, std::size_t size);
+
+ /** \brief Copy constructor. Just increments reference counter. */
+ DeviceArray(const DeviceArray& other);
+
+ /** \brief Assignment operator. Just increments reference counter. */
+ DeviceArray&
+ operator=(const DeviceArray& other);
+
+ /** \brief Allocates internal buffer in GPU memory. If internal buffer was created
+ * before the function recreates it with new size. If new and old sizes are equal it
+ * does nothing.
+ * \param size elements number
+ * */
+ void
+ create(std::size_t size);
+
+ /** \brief Decrements reference counter and releases internal buffer if needed. */
+ void
+ release();
+
+ /** \brief Performs data copying. If destination size differs it will be reallocated.
+ * \param other destination container
+ * */
+ void
+ copyTo(DeviceArray& other) const;
+
+ /** \brief Uploads data to internal buffer in GPU memory. It calls create() inside to
+ * ensure that intenal buffer size is enough.
+ * \param host_ptr pointer to buffer to upload
+ * \param size elements number
+ * */
+ void
+ upload(const T* host_ptr, std::size_t size);
+
+ /** \brief Uploads data from CPU memory to internal buffer.
+ * \return true if upload successful
+ * \note In contrast to the other upload function, this function
+ * never allocates memory.
+ * \param host_ptr pointer to buffer to upload
+ * \param device_begin_offset begin upload
+ * \param num_elements number of elements from device_bein_offset
+ * */
+ bool
+ upload(const T* host_ptr, std::size_t device_begin_offset, std::size_t num_elements);
+
+ /** \brief Downloads data from internal buffer to CPU memory
+ * \param host_ptr pointer to buffer to download
+ * */
+ void
+ download(T* host_ptr) const;
+
+ /** \brief Downloads data from internal buffer to CPU memory.
+ * \return true if download successful
+ * \param host_ptr pointer to buffer to download
+ * \param device_begin_offset begin download location
+ * \param num_elements number of elements from device_begin_offset
+ * */
+ bool
+ download(T* host_ptr,
+ std::size_t device_begin_offset,
+ std::size_t num_elements) const;
+
+ /** \brief Uploads data to internal buffer in GPU memory. It calls create() inside to
+ * ensure that intenal buffer size is enough.
+ * \param data host vector to upload from
+ * */
+ template <class A>
+ void
+ upload(const std::vector<T, A>& data);
+
+ /** \brief Downloads data from internal buffer to CPU memory
+ * \param data host vector to download to
+ * */
+ template <typename A>
+ void
+ download(std::vector<T, A>& data) const;
+
+ /** \brief Performs swap of data pointed with another device array.
+ * \param other_arg device array to swap with
+ * */
+ void
+ swap(DeviceArray& other_arg);
+
+ /** \brief Returns pointer for internal buffer in GPU memory. */
+ T*
+ ptr();
+
+ /** \brief Returns const pointer for internal buffer in GPU memory. */
+ const T*
+ ptr() const;
+
+ // using DeviceMemory::ptr;
+
+ /** \brief Returns pointer for internal buffer in GPU memory. */
+ operator T*();
+
+ /** \brief Returns const pointer for internal buffer in GPU memory. */
+ operator const T*() const;
+
+ /** \brief Returns size in elements. */
+ std::size_t
+ size() const;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+/** \brief @b DeviceArray2D class
+ *
+ * \note Typed container for pitched GPU memory with reference counting.
+ *
+ * \author Anatoly Baksheev
+ */
+template <class T>
+class PCL_EXPORTS DeviceArray2D : public DeviceMemory2D {
+public:
+ /** \brief Element type. */
+ using type = T;
+
+ /** \brief Element size. */
+ enum { elem_size = sizeof(T) };
+
+ /** \brief Empty constructor. */
+ DeviceArray2D();
+
+ /** \brief Allocates internal buffer in GPU memory
+ * \param rows number of rows to allocate
+ * \param cols number of elements in each row
+ * */
+ DeviceArray2D(int rows, int cols);
+
+ /** \brief Initializes with user allocated buffer. Reference counting is disabled in
+ * this case.
+ * \param rows number of rows
+ * \param cols number of elements in each row
+ * \param data pointer to buffer
+ * \param stepBytes stride between two consecutive rows in bytes
+ * */
+ DeviceArray2D(int rows, int cols, void* data, std::size_t stepBytes);
+
+ /** \brief Copy constructor. Just increments reference counter. */
+ DeviceArray2D(const DeviceArray2D& other);
+
+ /** \brief Assignment operator. Just increments reference counter. */
+ DeviceArray2D&
+ operator=(const DeviceArray2D& other);
+
+ /** \brief Allocates internal buffer in GPU memory. If internal buffer was created
+ * before the function recreates it with new size. If new and old sizes are equal it
+ * does nothing.
+ * \param rows number of rows to allocate
+ * \param cols number of elements in each row
+ * */
+ void
+ create(int rows, int cols);
+
+ /** \brief Decrements reference counter and releases internal buffer if needed. */
+ void
+ release();
+
+ /** \brief Performs data copying. If destination size differs it will be reallocated.
+ * \param other destination container
+ * */
+ void
+ copyTo(DeviceArray2D& other) const;
+
+ /** \brief Uploads data to internal buffer in GPU memory. It calls create() inside to
+ * ensure that intenal buffer size is enough.
+ * \param host_ptr pointer to host buffer to upload
+ * \param host_step stride between two consecutive rows in bytes for host buffer
+ * \param rows number of rows to upload
+ * \param cols number of elements in each row
+ * */
+ void
+ upload(const void* host_ptr, std::size_t host_step, int rows, int cols);
+
+ /** \brief Downloads data from internal buffer to CPU memory. User is responsible for
+ * correct host buffer size.
+ * \param host_ptr pointer to host buffer to download
+ * \param host_step stride between two consecutive rows in bytes for host buffer
+ * */
+ void
+ download(void* host_ptr, std::size_t host_step) const;
+
+ /** \brief Performs swap of data pointed with another device array.
+ * \param other_arg device array to swap with
+ * */
+ void
+ swap(DeviceArray2D& other_arg);
+
+ /** \brief Uploads data to internal buffer in GPU memory. It calls create() inside to
+ * ensure that intenal buffer size is enough.
+ * \param data host vector to upload from
+ * \param cols stride in elements between two consecutive rows for host buffer
+ * */
+ template <class A>
+ void
+ upload(const std::vector<T, A>& data, int cols);
+
+ /** \brief Downloads data from internal buffer to CPU memory
+ * \param data host vector to download to
+ * \param cols Output stride in elements between two consecutive rows for host vector.
+ * */
+ template <class A>
+ void
+ download(std::vector<T, A>& data, int& cols) const;
+
+ /** \brief Returns pointer to given row in internal buffer.
+ * \param y row index
+ * */
+ T*
+ ptr(int y = 0);
+
+ /** \brief Returns const pointer to given row in internal buffer.
+ * \param y row index
+ * */
+ const T*
+ ptr(int y = 0) const;
+
+ // using DeviceMemory2D::ptr;
+
+ /** \brief Returns pointer for internal buffer in GPU memory. */
+ operator T*();
+
+ /** \brief Returns const pointer for internal buffer in GPU memory. */
+ operator const T*() const;
+
+ /** \brief Returns number of elements in each row. */
+ int
+ cols() const;
+
+ /** \brief Returns number of rows. */
+ int
+ rows() const;
+
+ /** \brief Returns step in elements. */
+ std::size_t
+ elem_step() const;
+};
+} // namespace gpu
+
+namespace device {
+using pcl::gpu::DeviceArray;
+using pcl::gpu::DeviceArray2D;
+} // namespace device
+} // namespace pcl
#include <pcl/gpu/containers/impl/device_array.hpp>
#pragma once
-#include <pcl/pcl_exports.h>
#include <pcl/gpu/containers/kernel_containers.h>
+#include <pcl/pcl_exports.h>
+
+namespace pcl {
+namespace gpu {
+///////////////////////////////////////////////////////////////////////////////
+/** \brief @b DeviceMemory class
+ *
+ * \note This is a BLOB container class with reference counting for GPU memory.
+ *
+ * \author Anatoly Baksheev
+ */
+
+class PCL_EXPORTS DeviceMemory {
+public:
+ /** \brief Empty constructor. */
+ DeviceMemory();
+
+ /** \brief Destructor. */
+ ~DeviceMemory();
+
+ /** \brief Allocates internal buffer in GPU memory
+ * \param sizeBytes_arg amount of memory to allocate
+ * */
+ DeviceMemory(std::size_t sizeBytes_arg);
+
+ /** \brief Initializes with user allocated buffer. Reference counting is disabled in
+ * this case.
+ * \param ptr_arg pointer to buffer
+ * \param sizeBytes_arg buffer size
+ * */
+ DeviceMemory(void* ptr_arg, std::size_t sizeBytes_arg);
+
+ /** \brief Copy constructor. Just increments reference counter. */
+ DeviceMemory(const DeviceMemory& other_arg);
+
+ /** \brief Assignment operator. Just increments reference counter. */
+ DeviceMemory&
+ operator=(const DeviceMemory& other_arg);
+
+ /** \brief Allocates internal buffer in GPU memory. If internal buffer was created
+ * before the function recreates it with new size. If new and old sizes are equal it
+ * does nothing.
+ * \param sizeBytes_arg buffer size
+ * */
+ void
+ create(std::size_t sizeBytes_arg);
+
+ /** \brief Decrements reference counter and releases internal buffer if needed. */
+ void
+ release();
+
+ /** \brief Performs data copying. If destination size differs it will be reallocated.
+ * \param other destination container
+ * */
+ void
+ copyTo(DeviceMemory& other) const;
+
+ /** \brief Uploads data to internal buffer in GPU memory. It calls create() inside to
+ * ensure that intenal buffer size is enough.
+ * \param host_ptr_arg pointer to buffer to upload
+ * \param sizeBytes_arg buffer size
+ * */
+ void
+ upload(const void* host_ptr_arg, std::size_t sizeBytes_arg);
+
+ /** \brief Uploads data from CPU memory to device array.
+ * \note This overload never allocates memory in contrast to the
+ * other upload function.
+ * \return true if upload successful
+ * \param host_ptr_arg pointer to buffer to upload
+ * \param device_begin_byte_offset first byte position to upload to
+ * \param num_bytes number of bytes to upload
+ * */
+ bool
+ upload(const void* host_ptr,
+ std::size_t device_begin_byte_offset,
+ std::size_t num_bytes);
+
+ /** \brief Downloads data from internal buffer to CPU memory
+ * \param host_ptr_arg pointer to buffer to download
+ * */
+ void
+ download(void* host_ptr_arg) const;
+
+ /** \brief Downloads data from internal buffer to CPU memory.
+ * \return true if download successful
+ * \param host_ptr_arg pointer to buffer to download
+ * \param device_begin_byte_offset first byte position to download
+ * \param num_bytes number of bytes to download
+ * */
+ bool
+ download(void* host_ptr,
+ std::size_t device_begin_byte_offset,
+ std::size_t num_bytes) const;
+
+ /** \brief Performs swap of data pointed with another device memory.
+ * \param other_arg device memory to swap with
+ * */
+ void
+ swap(DeviceMemory& other_arg);
+
+ /** \brief Returns pointer for internal buffer in GPU memory. */
+ template <class T>
+ T*
+ ptr();
+
+ /** \brief Returns constant pointer for internal buffer in GPU memory. */
+ template <class T>
+ const T*
+ ptr() const;
+
+ /** \brief Conversion to PtrSz for passing to kernel functions. */
+ template <class U>
+ operator PtrSz<U>() const;
+
+ /** \brief Returns true if unallocated otherwise false. */
+ bool
+ empty() const;
+
+ std::size_t
+ sizeBytes() const;
+
+private:
+ /** \brief Device pointer. */
+ void* data_;
+
+ /** \brief Allocated size in bytes. */
+ std::size_t sizeBytes_;
+
+ /** \brief Pointer to reference counter in CPU memory. */
+ int* refcount_;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+/** \brief @b DeviceMemory2D class
+ *
+ * \note This is a BLOB container class with reference counting for pitched GPU memory.
+ *
+ * \author Anatoly Baksheev
+ */
-namespace pcl
-{
- namespace gpu
- {
- //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
- /** \brief @b DeviceMemory class
- *
- * \note This is a BLOB container class with reference counting for GPU memory.
- *
- * \author Anatoly Baksheev
- */
-
- class PCL_EXPORTS DeviceMemory
- {
- public:
- /** \brief Empty constructor. */
- DeviceMemory();
-
- /** \brief Destructor. */
- ~DeviceMemory();
-
- /** \brief Allocates internal buffer in GPU memory
- * \param sizeBytes_arg amount of memory to allocate
- * */
- DeviceMemory(std::size_t sizeBytes_arg);
-
- /** \brief Initializes with user allocated buffer. Reference counting is disabled in this case.
- * \param ptr_arg pointer to buffer
- * \param sizeBytes_arg buffer size
- * */
- DeviceMemory(void *ptr_arg, std::size_t sizeBytes_arg);
-
- /** \brief Copy constructor. Just increments reference counter. */
- DeviceMemory(const DeviceMemory& other_arg);
-
- /** \brief Assignment operator. Just increments reference counter. */
- DeviceMemory& operator=(const DeviceMemory& other_arg);
-
- /** \brief Allocates internal buffer in GPU memory. If internal buffer was created before the function recreates it with new size. If new and old sizes are equal it does nothing.
- * \param sizeBytes_arg buffer size
- * */
- void create(std::size_t sizeBytes_arg);
-
- /** \brief Decrements reference counter and releases internal buffer if needed. */
- void release();
-
- /** \brief Performs data copying. If destination size differs it will be reallocated.
- * \param other destination container
- * */
- void copyTo(DeviceMemory& other) const;
-
- /** \brief Uploads data to internal buffer in GPU memory. It calls create() inside to ensure that intenal buffer size is enough.
- * \param host_ptr_arg pointer to buffer to upload
- * \param sizeBytes_arg buffer size
- * */
- void upload(const void *host_ptr_arg, std::size_t sizeBytes_arg);
-
- /** \brief Downloads data from internal buffer to CPU memory
- * \param host_ptr_arg pointer to buffer to download
- * */
- void download(void *host_ptr_arg) const;
-
- /** \brief Performs swap of data pointed with another device memory.
- * \param other_arg device memory to swap with
- * */
- void swap(DeviceMemory& other_arg);
-
- /** \brief Returns pointer for internal buffer in GPU memory. */
- template<class T> T* ptr();
-
- /** \brief Returns constant pointer for internal buffer in GPU memory. */
- template<class T> const T* ptr() const;
-
- /** \brief Conversion to PtrSz for passing to kernel functions. */
- template <class U> operator PtrSz<U>() const;
-
- /** \brief Returns true if unallocated otherwise false. */
- bool empty() const;
-
- std::size_t sizeBytes() const;
-
- private:
- /** \brief Device pointer. */
- void *data_;
-
- /** \brief Allocated size in bytes. */
- std::size_t sizeBytes_;
-
- /** \brief Pointer to reference counter in CPU memory. */
- int* refcount_;
- };
-
- //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
- /** \brief @b DeviceMemory2D class
- *
- * \note This is a BLOB container class with reference counting for pitched GPU memory.
- *
- * \author Anatoly Baksheev
- */
-
- class PCL_EXPORTS DeviceMemory2D
- {
- public:
- /** \brief Empty constructor. */
- DeviceMemory2D();
-
- /** \brief Destructor. */
- ~DeviceMemory2D();
-
- /** \brief Allocates internal buffer in GPU memory
- * \param rows_arg number of rows to allocate
- * \param colsBytes_arg width of the buffer in bytes
- * */
- DeviceMemory2D(int rows_arg, int colsBytes_arg);
-
-
- /** \brief Initializes with user allocated buffer. Reference counting is disabled in this case.
- * \param rows_arg number of rows
- * \param colsBytes_arg width of the buffer in bytes
- * \param data_arg pointer to buffer
- * \param step_arg stride between two consecutive rows in bytes
- * */
- DeviceMemory2D(int rows_arg, int colsBytes_arg, void *data_arg, std::size_t step_arg);
-
- /** \brief Copy constructor. Just increments reference counter. */
- DeviceMemory2D(const DeviceMemory2D& other_arg);
-
- /** \brief Assignment operator. Just increments reference counter. */
- DeviceMemory2D& operator=(const DeviceMemory2D& other_arg);
-
- /** \brief Allocates internal buffer in GPU memory. If internal buffer was created before the function recreates it with new size. If new and old sizes are equal it does nothing.
- * \param rows_arg number of rows to allocate
- * \param colsBytes_arg width of the buffer in bytes
- * */
- void create(int rows_arg, int colsBytes_arg);
-
- /** \brief Decrements reference counter and releases internal buffer if needed. */
- void release();
-
- /** \brief Performs data copying. If destination size differs it will be reallocated.
- * \param other destination container
- * */
- void copyTo(DeviceMemory2D& other) const;
-
- /** \brief Uploads data to internal buffer in GPU memory. It calls create() inside to ensure that intenal buffer size is enough.
- * \param host_ptr_arg pointer to host buffer to upload
- * \param host_step_arg stride between two consecutive rows in bytes for host buffer
- * \param rows_arg number of rows to upload
- * \param colsBytes_arg width of host buffer in bytes
- * */
- void upload(const void *host_ptr_arg, std::size_t host_step_arg, int rows_arg, int colsBytes_arg);
-
- /** \brief Downloads data from internal buffer to CPU memory. User is responsible for correct host buffer size.
- * \param host_ptr_arg pointer to host buffer to download
- * \param host_step_arg stride between two consecutive rows in bytes for host buffer
- * */
- void download(void *host_ptr_arg, std::size_t host_step_arg) const;
-
- /** \brief Performs swap of data pointed with another device memory.
- * \param other_arg device memory to swap with
- * */
- void swap(DeviceMemory2D& other_arg);
-
- /** \brief Returns pointer to given row in internal buffer.
- * \param y_arg row index
- * */
- template<class T> T* ptr(int y_arg = 0);
-
- /** \brief Returns constant pointer to given row in internal buffer.
- * \param y_arg row index
- * */
- template<class T> const T* ptr(int y_arg = 0) const;
-
- /** \brief Conversion to PtrStep for passing to kernel functions. */
- template <class U> operator PtrStep<U>() const;
-
- /** \brief Conversion to PtrStepSz for passing to kernel functions. */
- template <class U> operator PtrStepSz<U>() const;
-
- /** \brief Returns true if unallocated otherwise false. */
- bool empty() const;
-
- /** \brief Returns number of bytes in each row. */
- int colsBytes() const;
-
- /** \brief Returns number of rows. */
- int rows() const;
-
- /** \brief Returns stride between two consecutive rows in bytes for internal buffer. Step is stored always and everywhere in bytes!!! */
- std::size_t step() const;
- private:
- /** \brief Device pointer. */
- void *data_;
-
- /** \brief Stride between two consecutive rows in bytes for internal buffer. Step is stored always and everywhere in bytes!!! */
- std::size_t step_;
-
- /** \brief Width of the buffer in bytes. */
- int colsBytes_;
-
- /** \brief Number of rows. */
- int rows_;
-
- /** \brief Pointer to reference counter in CPU memory. */
- int* refcount_;
- };
- }
-
- namespace device
- {
- using pcl::gpu::DeviceMemory;
- using pcl::gpu::DeviceMemory2D;
- }
-}
+class PCL_EXPORTS DeviceMemory2D {
+public:
+ /** \brief Empty constructor. */
+ DeviceMemory2D();
+
+ /** \brief Destructor. */
+ ~DeviceMemory2D();
+
+ /** \brief Allocates internal buffer in GPU memory
+ * \param rows_arg number of rows to allocate
+ * \param colsBytes_arg width of the buffer in bytes
+ * */
+ DeviceMemory2D(int rows_arg, int colsBytes_arg);
+
+ /** \brief Initializes with user allocated buffer. Reference counting is disabled in
+ * this case.
+ * \param rows_arg number of rows
+ * \param colsBytes_arg width of the buffer in bytes
+ * \param data_arg pointer to buffer
+ * \param step_arg stride between two consecutive rows in bytes
+ * */
+ DeviceMemory2D(int rows_arg, int colsBytes_arg, void* data_arg, std::size_t step_arg);
+
+ /** \brief Copy constructor. Just increments reference counter. */
+ DeviceMemory2D(const DeviceMemory2D& other_arg);
+
+ /** \brief Assignment operator. Just increments reference counter. */
+ DeviceMemory2D&
+ operator=(const DeviceMemory2D& other_arg);
+
+ /** \brief Allocates internal buffer in GPU memory. If internal buffer was created
+ * before the function recreates it with new size. If new and old sizes are equal it
+ * does nothing.
+ * \param rows_arg number of rows to allocate
+ * \param colsBytes_arg width of the buffer in bytes
+ * */
+ void
+ create(int rows_arg, int colsBytes_arg);
+
+ /** \brief Decrements reference counter and releases internal buffer if needed. */
+ void
+ release();
+
+ /** \brief Performs data copying. If destination size differs it will be reallocated.
+ * \param other destination container
+ * */
+ void
+ copyTo(DeviceMemory2D& other) const;
+
+ /** \brief Uploads data to internal buffer in GPU memory. It calls create() inside to
+ * ensure that intenal buffer size is enough.
+ * \param host_ptr_arg pointer to host buffer to upload
+ * \param host_step_arg stride between two consecutive rows in bytes for host buffer
+ * \param rows_arg number of rows to upload
+ * \param colsBytes_arg width of host buffer in bytes
+ * */
+ void
+ upload(const void* host_ptr_arg,
+ std::size_t host_step_arg,
+ int rows_arg,
+ int colsBytes_arg);
+
+ /** \brief Downloads data from internal buffer to CPU memory. User is responsible for
+ * correct host buffer size.
+ * \param host_ptr_arg pointer to host buffer to download
+ * \param host_step_arg stride between two consecutive rows in bytes for host buffer
+ * */
+ void
+ download(void* host_ptr_arg, std::size_t host_step_arg) const;
+
+ /** \brief Performs swap of data pointed with another device memory.
+ * \param other_arg device memory to swap with
+ * */
+ void
+ swap(DeviceMemory2D& other_arg);
+
+ /** \brief Returns pointer to given row in internal buffer.
+ * \param y_arg row index
+ * */
+ template <class T>
+ T*
+ ptr(int y_arg = 0);
+
+ /** \brief Returns constant pointer to given row in internal buffer.
+ * \param y_arg row index
+ * */
+ template <class T>
+ const T*
+ ptr(int y_arg = 0) const;
+
+ /** \brief Conversion to PtrStep for passing to kernel functions. */
+ template <class U>
+ operator PtrStep<U>() const;
+
+ /** \brief Conversion to PtrStepSz for passing to kernel functions. */
+ template <class U>
+ operator PtrStepSz<U>() const;
+
+ /** \brief Returns true if unallocated otherwise false. */
+ bool
+ empty() const;
+
+ /** \brief Returns number of bytes in each row. */
+ int
+ colsBytes() const;
+
+ /** \brief Returns number of rows. */
+ int
+ rows() const;
+
+ /** \brief Returns stride between two consecutive rows in bytes for internal buffer.
+ * Step is stored always and everywhere in bytes!!! */
+ std::size_t
+ step() const;
+
+private:
+ /** \brief Device pointer. */
+ void* data_;
+
+ /** \brief Stride between two consecutive rows in bytes for internal buffer. Step is
+ * stored always and everywhere in bytes!!! */
+ std::size_t step_;
+
+ /** \brief Width of the buffer in bytes. */
+ int colsBytes_;
+
+ /** \brief Number of rows. */
+ int rows_;
+
+ /** \brief Pointer to reference counter in CPU memory. */
+ int* refcount_;
+};
+} // namespace gpu
+
+namespace device {
+using pcl::gpu::DeviceMemory;
+using pcl::gpu::DeviceMemory2D;
+} // namespace device
+} // namespace pcl
#include <pcl/gpu/containers/impl/device_memory.hpp>
* Author: Anatoly Baskeheev, Itseez Ltd, (myname.mysurname@mycompany.com)
*/
-
#ifndef PCL_GPU_CONTAINER_DEVICE_ARRAY_IMPL_HPP_
#define PCL_GPU_CONTAINER_DEVICE_ARRAY_IMPL_HPP_
+namespace pcl {
+
+namespace gpu {
+
+//////////////////// Inline implementations of DeviceArray //////////////////
+
+template <class T>
+inline DeviceArray<T>::DeviceArray()
+{}
+
+template <class T>
+inline DeviceArray<T>::DeviceArray(std::size_t size) : DeviceMemory(size * elem_size)
+{}
+
+template <class T>
+inline DeviceArray<T>::DeviceArray(T* ptr, std::size_t size)
+: DeviceMemory(ptr, size * elem_size)
+{}
+
+template <class T>
+inline DeviceArray<T>::DeviceArray(const DeviceArray& other) : DeviceMemory(other)
+{}
+
+template <class T>
+inline DeviceArray<T>&
+DeviceArray<T>::operator=(const DeviceArray& other)
+{
+ DeviceMemory::operator=(other);
+ return *this;
+}
+
+template <class T>
+inline void
+DeviceArray<T>::create(std::size_t size)
+{
+ DeviceMemory::create(size * elem_size);
+}
+
+template <class T>
+inline void
+DeviceArray<T>::release()
+{
+ DeviceMemory::release();
+}
+
+template <class T>
+inline void
+DeviceArray<T>::copyTo(DeviceArray& other) const
+{
+ DeviceMemory::copyTo(other);
+}
+
+template <class T>
+inline void
+DeviceArray<T>::upload(const T* host_ptr, std::size_t size)
+{
+ DeviceMemory::upload(host_ptr, size * elem_size);
+}
+
+template <class T>
+inline bool
+DeviceArray<T>::upload(const T* host_ptr,
+ std::size_t device_begin_offset,
+ std::size_t num_elements)
+{
+ std::size_t begin_byte_offset = device_begin_offset * sizeof(T);
+ std::size_t num_bytes = num_elements * sizeof(T);
+ return DeviceMemory::upload(host_ptr, begin_byte_offset, num_bytes);
+}
+
+template <class T>
+inline void
+DeviceArray<T>::download(T* host_ptr) const
+{
+ DeviceMemory::download(host_ptr);
+}
+
+template <class T>
+inline bool
+DeviceArray<T>::download(T* host_ptr,
+ std::size_t device_begin_offset,
+ std::size_t num_elements) const
+{
+ std::size_t begin_byte_offset = device_begin_offset * sizeof(T);
+ std::size_t num_bytes = num_elements * sizeof(T);
+ return DeviceMemory::download(host_ptr, begin_byte_offset, num_bytes);
+}
+
+template <class T>
+void
+DeviceArray<T>::swap(DeviceArray& other_arg)
+{
+ DeviceMemory::swap(other_arg);
+}
+
+template <class T>
+inline DeviceArray<T>::operator T*()
+{
+ return ptr();
+}
+
+template <class T>
+inline DeviceArray<T>::operator const T*() const
+{
+ return ptr();
+}
-namespace pcl
+template <class T>
+inline std::size_t
+DeviceArray<T>::size() const
{
+ return sizeBytes() / elem_size;
+}
-namespace gpu
+template <class T>
+inline T*
+DeviceArray<T>::ptr()
{
+ return DeviceMemory::ptr<T>();
+}
-///////////////////// Inline implementations of DeviceArray ////////////////////////////////////////////
+template <class T>
+inline const T*
+DeviceArray<T>::ptr() const
+{
+ return DeviceMemory::ptr<T>();
+}
-template<class T> inline DeviceArray<T>::DeviceArray() {}
-template<class T> inline DeviceArray<T>::DeviceArray(std::size_t size) : DeviceMemory(size * elem_size) {}
-template<class T> inline DeviceArray<T>::DeviceArray(T *ptr, std::size_t size) : DeviceMemory(ptr, size * elem_size) {}
-template<class T> inline DeviceArray<T>::DeviceArray(const DeviceArray& other) : DeviceMemory(other) {}
-template<class T> inline DeviceArray<T>& DeviceArray<T>::operator=(const DeviceArray& other)
-{ DeviceMemory::operator=(other); return *this; }
+template <class T>
+template <class A>
+inline void
+DeviceArray<T>::upload(const std::vector<T, A>& data)
+{
+ upload(&data[0], data.size());
+}
-template<class T> inline void DeviceArray<T>::create(std::size_t size)
-{ DeviceMemory::create(size * elem_size); }
-template<class T> inline void DeviceArray<T>::release()
-{ DeviceMemory::release(); }
+template <class T>
+template <class A>
+inline void
+DeviceArray<T>::download(std::vector<T, A>& data) const
+{
+ data.resize(size());
+ if (!data.empty())
+ download(&data[0]);
+}
-template<class T> inline void DeviceArray<T>::copyTo(DeviceArray& other) const
-{ DeviceMemory::copyTo(other); }
-template<class T> inline void DeviceArray<T>::upload(const T *host_ptr, std::size_t size)
-{ DeviceMemory::upload(host_ptr, size * elem_size); }
-template<class T> inline void DeviceArray<T>::download(T *host_ptr) const
-{ DeviceMemory::download( host_ptr ); }
+/////////////////// Inline implementations of DeviceArray2D //////////////////
-template<class T> void DeviceArray<T>::swap(DeviceArray& other_arg) { DeviceMemory::swap(other_arg); }
+template <class T>
+inline DeviceArray2D<T>::DeviceArray2D()
+{}
-template<class T> inline DeviceArray<T>::operator T*() { return ptr(); }
-template<class T> inline DeviceArray<T>::operator const T*() const { return ptr(); }
-template<class T> inline std::size_t DeviceArray<T>::size() const { return sizeBytes() / elem_size; }
+template <class T>
+inline DeviceArray2D<T>::DeviceArray2D(int rows, int cols)
+: DeviceMemory2D(rows, cols * elem_size)
+{}
-template<class T> inline T* DeviceArray<T>::ptr() { return DeviceMemory::ptr<T>(); }
-template<class T> inline const T* DeviceArray<T>::ptr() const { return DeviceMemory::ptr<T>(); }
+template <class T>
+inline DeviceArray2D<T>::DeviceArray2D(int rows,
+ int cols,
+ void* data,
+ std::size_t stepBytes)
+: DeviceMemory2D(rows, cols * elem_size, data, stepBytes)
+{}
-template<class T> template<class A> inline void DeviceArray<T>::upload(const std::vector<T, A>& data) { upload(&data[0], data.size()); }
-template<class T> template<class A> inline void DeviceArray<T>::download(std::vector<T, A>& data) const { data.resize(size()); if (!data.empty()) download(&data[0]); }
+template <class T>
+inline DeviceArray2D<T>::DeviceArray2D(const DeviceArray2D& other)
+: DeviceMemory2D(other)
+{}
-///////////////////// Inline implementations of DeviceArray2D ////////////////////////////////////////////
+template <class T>
+inline DeviceArray2D<T>&
+DeviceArray2D<T>::operator=(const DeviceArray2D& other)
+{
+ DeviceMemory2D::operator=(other);
+ return *this;
+}
-template<class T> inline DeviceArray2D<T>::DeviceArray2D() {}
-template<class T> inline DeviceArray2D<T>::DeviceArray2D(int rows, int cols) : DeviceMemory2D(rows, cols * elem_size) {}
-template<class T> inline DeviceArray2D<T>::DeviceArray2D(int rows, int cols, void *data, std::size_t stepBytes) : DeviceMemory2D(rows, cols * elem_size, data, stepBytes) {}
-template<class T> inline DeviceArray2D<T>::DeviceArray2D(const DeviceArray2D& other) : DeviceMemory2D(other) {}
-template<class T> inline DeviceArray2D<T>& DeviceArray2D<T>::operator=(const DeviceArray2D& other)
-{ DeviceMemory2D::operator=(other); return *this; }
+template <class T>
+inline void
+DeviceArray2D<T>::create(int rows, int cols)
+{
+ DeviceMemory2D::create(rows, cols * elem_size);
+}
-template<class T> inline void DeviceArray2D<T>::create(int rows, int cols)
-{ DeviceMemory2D::create(rows, cols * elem_size); }
-template<class T> inline void DeviceArray2D<T>::release()
-{ DeviceMemory2D::release(); }
+template <class T>
+inline void
+DeviceArray2D<T>::release()
+{
+ DeviceMemory2D::release();
+}
-template<class T> inline void DeviceArray2D<T>::copyTo(DeviceArray2D& other) const
-{ DeviceMemory2D::copyTo(other); }
-template<class T> inline void DeviceArray2D<T>::upload(const void *host_ptr, std::size_t host_step, int rows, int cols)
-{ DeviceMemory2D::upload(host_ptr, host_step, rows, cols * elem_size); }
-template<class T> inline void DeviceArray2D<T>::download(void *host_ptr, std::size_t host_step) const
-{ DeviceMemory2D::download( host_ptr, host_step ); }
+template <class T>
+inline void
+DeviceArray2D<T>::copyTo(DeviceArray2D& other) const
+{
+ DeviceMemory2D::copyTo(other);
+}
-template<class T> template<class A> inline void DeviceArray2D<T>::upload(const std::vector<T, A>& data, int cols)
-{ upload(&data[0], cols * elem_size, data.size()/cols, cols); }
+template <class T>
+inline void
+DeviceArray2D<T>::upload(const void* host_ptr,
+ std::size_t host_step,
+ int rows,
+ int cols)
+{
+ DeviceMemory2D::upload(host_ptr, host_step, rows, cols * elem_size);
+}
-template<class T> template<class A> inline void DeviceArray2D<T>::download(std::vector<T, A>& data, int& elem_step) const
-{ elem_step = cols(); data.resize(cols() * rows()); if (!data.empty()) download(&data[0], colsBytes()); }
+template <class T>
+inline void
+DeviceArray2D<T>::download(void* host_ptr, std::size_t host_step) const
+{
+ DeviceMemory2D::download(host_ptr, host_step);
+}
-template<class T> void DeviceArray2D<T>::swap(DeviceArray2D& other_arg) { DeviceMemory2D::swap(other_arg); }
+template <class T>
+template <class A>
+inline void
+DeviceArray2D<T>::upload(const std::vector<T, A>& data, int cols)
+{
+ upload(&data[0], cols * elem_size, data.size() / cols, cols);
+}
-template<class T> inline T* DeviceArray2D<T>::ptr(int y) { return DeviceMemory2D::ptr<T>(y); }
-template<class T> inline const T* DeviceArray2D<T>::ptr(int y) const { return DeviceMemory2D::ptr<T>(y); }
+template <class T>
+template <class A>
+inline void
+DeviceArray2D<T>::download(std::vector<T, A>& data, int& elem_step) const
+{
+ elem_step = cols();
+ data.resize(cols() * rows());
+ if (!data.empty())
+ download(&data[0], colsBytes());
+}
-template<class T> inline DeviceArray2D<T>::operator T*() { return ptr(); }
-template<class T> inline DeviceArray2D<T>::operator const T*() const { return ptr(); }
+template <class T>
+void
+DeviceArray2D<T>::swap(DeviceArray2D& other_arg)
+{
+ DeviceMemory2D::swap(other_arg);
+}
-template<class T> inline int DeviceArray2D<T>::cols() const { return DeviceMemory2D::colsBytes()/elem_size; }
-template<class T> inline int DeviceArray2D<T>::rows() const { return DeviceMemory2D::rows(); }
+template <class T>
+inline T*
+DeviceArray2D<T>::ptr(int y)
+{
+ return DeviceMemory2D::ptr<T>(y);
+}
+
+template <class T>
+inline const T*
+DeviceArray2D<T>::ptr(int y) const
+{
+ return DeviceMemory2D::ptr<T>(y);
+}
-template<class T> inline std::size_t DeviceArray2D<T>::elem_step() const { return DeviceMemory2D::step()/elem_size; }
+template <class T>
+inline DeviceArray2D<T>::operator T*()
+{
+ return ptr();
+}
+
+template <class T>
+inline DeviceArray2D<T>::operator const T*() const
+{
+ return ptr();
+}
+
+template <class T>
+inline int
+DeviceArray2D<T>::cols() const
+{
+ return DeviceMemory2D::colsBytes() / elem_size;
+}
+
+template <class T>
+inline int
+DeviceArray2D<T>::rows() const
+{
+ return DeviceMemory2D::rows();
+}
+
+template <class T>
+inline std::size_t
+DeviceArray2D<T>::elem_step() const
+{
+ return DeviceMemory2D::step() / elem_size;
+}
} // namespace gpu
} // namespace pcl
#endif /* PCL_GPU_CONTAINER_DEVICE_ARRAY_IMPL_HPP_ */
-
#ifndef PCL_GPU_CONTAINER_DEVICE_MEMORY_IMPL_HPP_
#define PCL_GPU_CONTAINER_DEVICE_MEMORY_IMPL_HPP_
-namespace pcl
-{
+namespace pcl {
-namespace gpu
-{
+namespace gpu {
-///////////////////// Inline implementations of DeviceMemory ////////////////////////////////////////////
-template<class T> inline T*
+//////////////////// Inline implementations of DeviceMemory //////////////////
+template <class T>
+inline T*
DeviceMemory::ptr()
{
return (T*)data_;
}
-template<class T> inline const T*
+template <class T>
+inline const T*
DeviceMemory::ptr() const
{
return (const T*)data_;
}
-template <class U> inline DeviceMemory::operator
-PtrSz<U>() const
+template <class U>
+inline DeviceMemory::operator PtrSz<U>() const
{
- PtrSz<U> result;
- result.data = (U*)ptr<U>();
- result.size = sizeBytes_/sizeof(U);
- return result;
+ PtrSz<U> result;
+ result.data = (U*)ptr<U>();
+ result.size = sizeBytes_ / sizeof(U);
+ return result;
}
-///////////////////// Inline implementations of DeviceMemory2D ////////////////////////////////////////////
-template<class T> T*
+//////////////////// Inline implementations of DeviceMemory2D ////////////////
+template <class T>
+T*
DeviceMemory2D::ptr(int y_arg)
{
return (T*)((char*)data_ + y_arg * step_);
}
-template<class T> const T*
+template <class T>
+const T*
DeviceMemory2D::ptr(int y_arg) const
{
return (const T*)((const char*)data_ + y_arg * step_);
}
-template <class U> DeviceMemory2D::operator
-PtrStep<U>() const
+template <class U>
+DeviceMemory2D::operator PtrStep<U>() const
{
- PtrStep<U> result;
- result.data = (U*)ptr<U>();
- result.step = step_;
- return result;
+ PtrStep<U> result;
+ result.data = (U*)ptr<U>();
+ result.step = step_;
+ return result;
}
-template <class U> DeviceMemory2D::operator
-PtrStepSz<U>() const
+template <class U>
+DeviceMemory2D::operator PtrStepSz<U>() const
{
- PtrStepSz<U> result;
- result.data = (U*)ptr<U>();
- result.step = step_;
- result.cols = colsBytes_/sizeof(U);
- result.rows = rows_;
- return result;
+ PtrStepSz<U> result;
+ result.data = (U*)ptr<U>();
+ result.step = step_;
+ result.cols = colsBytes_ / sizeof(U);
+ result.rows = rows_;
+ return result;
}
} // namespace gpu
#pragma once
#include <pcl/pcl_exports.h>
+
#include <string>
-namespace pcl
-{
- namespace gpu
- {
- /** \brief Returns number of Cuda device. */
- PCL_EXPORTS int getCudaEnabledDeviceCount();
+namespace pcl {
+namespace gpu {
+/** \brief Returns number of Cuda device. */
+PCL_EXPORTS int
+getCudaEnabledDeviceCount();
- /** \brief Sets active device to work with. */
- PCL_EXPORTS void setDevice(int device);
+/** \brief Sets active device to work with. */
+PCL_EXPORTS void
+setDevice(int device);
- /** \brief Return device name for given device. */
- PCL_EXPORTS std::string getDeviceName(int device);
+/** \brief Return device name for given device. */
+PCL_EXPORTS std::string
+getDeviceName(int device);
- /** \brief Prints information about given cuda device or about all devices
- * \param device: if < 0 prints info for all devices, otherwise the function interprets it as device id.
- */
- void PCL_EXPORTS printCudaDeviceInfo(int device = -1);
+/** \brief Prints information about given cuda device or about all devices
+ * \param device: if < 0 prints info for all devices, otherwise the function interprets
+ * it as device id.
+ */
+void PCL_EXPORTS
+printCudaDeviceInfo(int device = -1);
- /** \brief Prints information about given cuda device or about all devices
- * \param device: if < 0 prints info for all devices, otherwise the function interprets it as device id.
- */
- void PCL_EXPORTS printShortCudaDeviceInfo(int device = -1);
+/** \brief Prints information about given cuda device or about all devices
+ * \param device: if < 0 prints info for all devices, otherwise the function interprets
+ * it as device id.
+ */
+void PCL_EXPORTS
+printShortCudaDeviceInfo(int device = -1);
- /** \brief Returns true if pre-Fermi generator GPU.
- * \param device: device id to check, if < 0 checks current device.
- */
- bool PCL_EXPORTS checkIfPreFermiGPU(int device = -1);
+/** \brief Returns true if pre-Fermi generator GPU.
+ * \param device: device id to check, if < 0 checks current device.
+ */
+bool PCL_EXPORTS
+checkIfPreFermiGPU(int device = -1);
- /** \brief Error handler. All GPU functions call this to report an error. For internal use only */
- void PCL_EXPORTS error(const char *error_string, const char *file, const int line, const char *func = "");
- }
-}
+/** \brief Error handler. All GPU functions call this to report an error. For internal
+ * use only */
+void PCL_EXPORTS
+error(const char* error_string,
+ const char* file,
+ const int line,
+ const char* func = "");
+} // namespace gpu
+} // namespace pcl
#pragma once
-#if defined(__CUDACC__)
- #define __PCL_GPU_HOST_DEVICE__ __host__ __device__ __forceinline__
+#if defined(__CUDACC__)
+#define __PCL_GPU_HOST_DEVICE__ __host__ __device__ __forceinline__
#else
- #define __PCL_GPU_HOST_DEVICE__
-#endif
+#define __PCL_GPU_HOST_DEVICE__
+#endif
#include <cstddef>
-namespace pcl
-{
- namespace gpu
- {
- template<typename T> struct DevPtr
- {
- using elem_type = T;
- const static std::size_t elem_size = sizeof(elem_type);
-
- T* data;
-
- __PCL_GPU_HOST_DEVICE__ DevPtr() : data(nullptr) {}
- __PCL_GPU_HOST_DEVICE__ DevPtr(T* data_arg) : data(data_arg) {}
-
- __PCL_GPU_HOST_DEVICE__ std::size_t elemSize() const { return elem_size; }
- __PCL_GPU_HOST_DEVICE__ operator T*() { return data; }
- __PCL_GPU_HOST_DEVICE__ operator const T*() const { return data; }
- };
-
- template<typename T> struct PtrSz : public DevPtr<T>
- {
- __PCL_GPU_HOST_DEVICE__ PtrSz() : size(0) {}
- __PCL_GPU_HOST_DEVICE__ PtrSz(T* data_arg, std::size_t size_arg) : DevPtr<T>(data_arg), size(size_arg) {}
-
- std::size_t size;
- };
-
- template<typename T> struct PtrStep : public DevPtr<T>
- {
- __PCL_GPU_HOST_DEVICE__ PtrStep() : step(0) {}
- __PCL_GPU_HOST_DEVICE__ PtrStep(T* data_arg, std::size_t step_arg) : DevPtr<T>(data_arg), step(step_arg) {}
-
- /** \brief stride between two consecutive rows in bytes. Step is stored always and everywhere in bytes!!! */
- std::size_t step;
-
- __PCL_GPU_HOST_DEVICE__ T* ptr(int y = 0) { return ( T*)( ( char*)DevPtr<T>::data + y * step); }
- __PCL_GPU_HOST_DEVICE__ const T* ptr(int y = 0) const { return (const T*)( (const char*)DevPtr<T>::data + y * step); }
-
- __PCL_GPU_HOST_DEVICE__ T& operator()(int y, int x) { return ptr(y)[x]; }
- __PCL_GPU_HOST_DEVICE__ const T& operator()(int y, int x) const { return ptr(y)[x]; }
- };
-
- template <typename T> struct PtrStepSz : public PtrStep<T>
- {
- __PCL_GPU_HOST_DEVICE__ PtrStepSz() : cols(0), rows(0) {}
- __PCL_GPU_HOST_DEVICE__ PtrStepSz(int rows_arg, int cols_arg, T* data_arg, std::size_t step_arg)
- : PtrStep<T>(data_arg, step_arg), cols(cols_arg), rows(rows_arg) {}
-
- int cols;
- int rows;
- };
- }
-
- namespace device
- {
- using pcl::gpu::PtrSz;
- using pcl::gpu::PtrStep;
- using pcl::gpu::PtrStepSz;
- }
-}
+namespace pcl {
+namespace gpu {
+template <typename T>
+struct DevPtr {
+ using elem_type = T;
+ const static std::size_t elem_size = sizeof(elem_type);
+
+ T* data;
+
+ __PCL_GPU_HOST_DEVICE__
+ DevPtr() : data(nullptr) {}
+
+ __PCL_GPU_HOST_DEVICE__
+ DevPtr(T* data_arg) : data(data_arg) {}
+
+ __PCL_GPU_HOST_DEVICE__ std::size_t
+ elemSize() const
+ {
+ return elem_size;
+ }
+
+ __PCL_GPU_HOST_DEVICE__
+ operator T*() { return data; }
+ __PCL_GPU_HOST_DEVICE__ operator const T*() const { return data; }
+};
+
+template <typename T>
+struct PtrSz : public DevPtr<T> {
+ __PCL_GPU_HOST_DEVICE__
+ PtrSz() : size(0) {}
+
+ __PCL_GPU_HOST_DEVICE__
+ PtrSz(T* data_arg, std::size_t size_arg) : DevPtr<T>(data_arg), size(size_arg) {}
+
+ std::size_t size;
+};
+
+template <typename T>
+struct PtrStep : public DevPtr<T> {
+ __PCL_GPU_HOST_DEVICE__
+ PtrStep() : step(0) {}
+
+ __PCL_GPU_HOST_DEVICE__
+ PtrStep(T* data_arg, std::size_t step_arg) : DevPtr<T>(data_arg), step(step_arg) {}
+
+ /** \brief stride between two consecutive rows in bytes. Step is stored always and
+ * everywhere in bytes!!! */
+ std::size_t step;
+
+ __PCL_GPU_HOST_DEVICE__ T*
+ ptr(int y = 0)
+ {
+ return (T*)((char*)DevPtr<T>::data + y * step);
+ }
+
+ __PCL_GPU_HOST_DEVICE__ const T*
+ ptr(int y = 0) const
+ {
+ return (const T*)((const char*)DevPtr<T>::data + y * step);
+ }
+
+ __PCL_GPU_HOST_DEVICE__ T&
+ operator()(int y, int x)
+ {
+ return ptr(y)[x];
+ }
+
+ __PCL_GPU_HOST_DEVICE__ const T&
+ operator()(int y, int x) const
+ {
+ return ptr(y)[x];
+ }
+};
+
+template <typename T>
+struct PtrStepSz : public PtrStep<T> {
+ __PCL_GPU_HOST_DEVICE__
+ PtrStepSz() : cols(0), rows(0) {}
+
+ __PCL_GPU_HOST_DEVICE__
+ PtrStepSz(int rows_arg, int cols_arg, T* data_arg, std::size_t step_arg)
+ : PtrStep<T>(data_arg, step_arg), cols(cols_arg), rows(rows_arg)
+ {}
+
+ int cols;
+ int rows;
+};
+} // namespace gpu
+
+namespace device {
+using pcl::gpu::PtrStep;
+using pcl::gpu::PtrStepSz;
+using pcl::gpu::PtrSz;
+} // namespace device
+} // namespace pcl
#undef __PCL_GPU_HOST_DEVICE__
- /*
+/*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
#include <pcl/gpu/utils/safe_call.hpp>
#include <cuda_runtime_api.h>
+
#include <cassert>
#define HAVE_CUDA
#if !defined(HAVE_CUDA)
-void throw_nogpu() { throw "PCL 2.0 exception"; }
+void
+throw_nogpu()
+{
+ throw "PCL 2.0 exception";
+}
pcl::gpu::DeviceMemory::DeviceMemory() { throw_nogpu(); }
-pcl::gpu::DeviceMemory::DeviceMemory(void *, std::size_t) { throw_nogpu(); }
+
+pcl::gpu::DeviceMemory::DeviceMemory(void*, std::size_t) { throw_nogpu(); }
+
pcl::gpu::DeviceMemory::DeviceMemory(std::size_t) { throw_nogpu(); }
+
pcl::gpu::DeviceMemory::~DeviceMemory() { throw_nogpu(); }
-pcl::gpu::DeviceMemory::DeviceMemory(const DeviceMemory& ) { throw_nogpu(); }
-pcl::gpu::DeviceMemory& pcl::gpu::DeviceMemory::operator=(const pcl::gpu::DeviceMemory&) { throw_nogpu(); return *this;}
+
+pcl::gpu::DeviceMemory::DeviceMemory(const DeviceMemory&) { throw_nogpu(); }
+
+pcl::gpu::DeviceMemory&
+
+pcl::gpu::DeviceMemory::operator=(const pcl::gpu::DeviceMemory&)
+{
+ throw_nogpu();
+ return *this;
+}
+
void pcl::gpu::DeviceMemory::create(std::size_t) { throw_nogpu(); }
-void pcl::gpu::DeviceMemory::release() { throw_nogpu(); }
-void pcl::gpu::DeviceMemory::copyTo(DeviceMemory&) const { throw_nogpu(); }
-void pcl::gpu::DeviceMemory::upload(const void*, std::size_t) { throw_nogpu(); }
-void pcl::gpu::DeviceMemory::download(void*) const { throw_nogpu(); }
-bool pcl::gpu::DeviceMemory::empty() const { throw_nogpu(); }
+
+void
+pcl::gpu::DeviceMemory::release()
+{
+ throw_nogpu();
+}
+
+void
+pcl::gpu::DeviceMemory::copyTo(DeviceMemory&) const
+{
+ throw_nogpu();
+}
+
+void
+pcl::gpu::DeviceMemory::upload(const void*, std::size_t)
+{
+ throw_nogpu();
+}
+
+void
+pcl::gpu::DeviceMemory::download(void*) const
+{
+ throw_nogpu();
+}
+
+bool
+pcl::gpu::DeviceMemory::empty() const
+{
+ throw_nogpu();
+}
+
pcl::gpu::DeviceMemory2D::DeviceMemory2D() { throw_nogpu(); }
-pcl::gpu::DeviceMemory2D::DeviceMemory2D(int, int) { throw_nogpu(); }
-pcl::gpu::DeviceMemory2D::DeviceMemory2D(int, int, void*, std::size_t) { throw_nogpu(); }
+
+pcl::gpu::DeviceMemory2D::DeviceMemory2D(int, int) { throw_nogpu(); }
+
+pcl::gpu::DeviceMemory2D::DeviceMemory2D(int, int, void*, std::size_t)
+{
+ throw_nogpu();
+}
+
pcl::gpu::DeviceMemory2D::~DeviceMemory2D() { throw_nogpu(); }
-pcl::gpu::DeviceMemory2D::DeviceMemory2D(const DeviceMemory2D&) { throw_nogpu(); }
-pcl::gpu::DeviceMemory2D& pcl::gpu::DeviceMemory2D::operator=(const pcl::gpu::DeviceMemory2D&) { throw_nogpu(); return *this;}
-void pcl::gpu::DeviceMemory2D::create(int, int ) { throw_nogpu(); }
-void pcl::gpu::DeviceMemory2D::release() { throw_nogpu(); }
-void pcl::gpu::DeviceMemory2D::copyTo(DeviceMemory2D&) const { throw_nogpu(); }
-void pcl::gpu::DeviceMemory2D::upload(const void *, std::size_t, int, int ) { throw_nogpu(); }
-void pcl::gpu::DeviceMemory2D::download(void *, std::size_t ) const { throw_nogpu(); }
-bool pcl::gpu::DeviceMemory2D::empty() const { throw_nogpu(); }
+
+pcl::gpu::DeviceMemory2D::DeviceMemory2D(const DeviceMemory2D&) { throw_nogpu(); }
+
+pcl::gpu::DeviceMemory2D&
+
+pcl::gpu::DeviceMemory2D::operator=(const pcl::gpu::DeviceMemory2D&)
+{
+ throw_nogpu();
+ return *this;
+}
+
+void
+pcl::gpu::DeviceMemory2D::create(int, int)
+{
+ throw_nogpu();
+}
+
+void
+pcl::gpu::DeviceMemory2D::release()
+{
+ throw_nogpu();
+}
+
+void
+pcl::gpu::DeviceMemory2D::copyTo(DeviceMemory2D&) const
+{
+ throw_nogpu();
+}
+
+void
+pcl::gpu::DeviceMemory2D::upload(const void*, std::size_t, int, int)
+{
+ throw_nogpu();
+}
+
+void
+pcl::gpu::DeviceMemory2D::download(void*, std::size_t) const
+{
+ throw_nogpu();
+}
+
+bool
+pcl::gpu::DeviceMemory2D::empty() const
+{
+ throw_nogpu();
+}
#else
////////////////////////// XADD ///////////////////////////////
#ifdef __GNUC__
- #if !defined WIN32 && (defined __i486__ || defined __i586__ || defined __i686__ || defined __MMX__ || defined __SSE__ || defined __ppc__)
- #define CV_XADD __sync_fetch_and_add
- #else
- #include <ext/atomicity.h>
- #define CV_XADD __gnu_cxx::__exchange_and_add
- #endif
+#if !defined WIN32 && (defined __i486__ || defined __i586__ || defined __i686__ || \
+ defined __MMX__ || defined __SSE__ || defined __ppc__)
+#define CV_XADD __sync_fetch_and_add
+#else
+#include <ext/atomicity.h>
+#define CV_XADD __gnu_cxx::__exchange_and_add
+#endif
#elif defined WIN32 || defined _WIN32
- #include <intrin.h>
- #define CV_XADD(addr,delta) _InterlockedExchangeAdd((long volatile*)(addr), (delta))
+#include <intrin.h>
+#define CV_XADD(addr, delta) _InterlockedExchangeAdd((long volatile*)(addr), (delta))
#else
- template<typename _Tp> static inline _Tp CV_XADD(_Tp* addr, _Tp delta)
- { int tmp = *addr; *addr += delta; return tmp; }
-
+template <typename _Tp>
+static inline _Tp
+CV_XADD(_Tp* addr, _Tp delta)
+{
+ int tmp = *addr;
+ *addr += delta;
+ return tmp;
+}
+
#endif
//////////////////////// DeviceArray /////////////////////////////
-
-pcl::gpu::DeviceMemory::DeviceMemory() : data_(nullptr), sizeBytes_(0), refcount_(nullptr) {}
-pcl::gpu::DeviceMemory::DeviceMemory(void *ptr_arg, std::size_t sizeBytes_arg) : data_(ptr_arg), sizeBytes_(sizeBytes_arg), refcount_(nullptr){}
-pcl::gpu::DeviceMemory::DeviceMemory(std::size_t sizeBtes_arg) : data_(nullptr), sizeBytes_(0), refcount_(nullptr) { create(sizeBtes_arg); }
+
+pcl::gpu::DeviceMemory::DeviceMemory()
+: data_(nullptr), sizeBytes_(0), refcount_(nullptr)
+{}
+
+pcl::gpu::DeviceMemory::DeviceMemory(void* ptr_arg, std::size_t sizeBytes_arg)
+: data_(ptr_arg), sizeBytes_(sizeBytes_arg), refcount_(nullptr)
+{}
+
+pcl::gpu::DeviceMemory::DeviceMemory(std::size_t sizeBtes_arg)
+: data_(nullptr), sizeBytes_(0), refcount_(nullptr)
+{
+ create(sizeBtes_arg);
+}
+
pcl::gpu::DeviceMemory::~DeviceMemory() { release(); }
-pcl::gpu::DeviceMemory::DeviceMemory(const DeviceMemory& other_arg)
- : data_(other_arg.data_), sizeBytes_(other_arg.sizeBytes_), refcount_(other_arg.refcount_)
+pcl::gpu::DeviceMemory::DeviceMemory(const DeviceMemory& other_arg)
+: data_(other_arg.data_)
+, sizeBytes_(other_arg.sizeBytes_)
+, refcount_(other_arg.refcount_)
{
- if( refcount_ )
- CV_XADD(refcount_, 1);
+ if (refcount_)
+ CV_XADD(refcount_, 1);
}
-pcl::gpu::DeviceMemory& pcl::gpu::DeviceMemory::operator = (const pcl::gpu::DeviceMemory& other_arg)
+pcl::gpu::DeviceMemory&
+pcl::gpu::DeviceMemory::operator=(const pcl::gpu::DeviceMemory& other_arg)
{
- if( this != &other_arg )
- {
- if( other_arg.refcount_ )
- CV_XADD(other_arg.refcount_, 1);
- release();
-
- data_ = other_arg.data_;
- sizeBytes_ = other_arg.sizeBytes_;
- refcount_ = other_arg.refcount_;
- }
- return *this;
+ if (this != &other_arg) {
+ if (other_arg.refcount_)
+ CV_XADD(other_arg.refcount_, 1);
+ release();
+
+ data_ = other_arg.data_;
+ sizeBytes_ = other_arg.sizeBytes_;
+ refcount_ = other_arg.refcount_;
+ }
+ return *this;
}
-void pcl::gpu::DeviceMemory::create(std::size_t sizeBytes_arg)
+void
+pcl::gpu::DeviceMemory::create(std::size_t sizeBytes_arg)
{
- if (sizeBytes_arg == sizeBytes_)
- return;
-
- if( sizeBytes_arg > 0)
- {
- if( data_ )
- release();
+ if (sizeBytes_arg == sizeBytes_)
+ return;
+
+ if (sizeBytes_arg > 0) {
+ if (data_)
+ release();
+
+ sizeBytes_ = sizeBytes_arg;
- sizeBytes_ = sizeBytes_arg;
-
- cudaSafeCall( cudaMalloc(&data_, sizeBytes_) );
-
- //refcount_ = (int*)cv::fastMalloc(sizeof(*refcount_));
- refcount_ = new int;
- *refcount_ = 1;
- }
+ cudaSafeCall(cudaMalloc(&data_, sizeBytes_));
+
+ // refcount_ = (int*)cv::fastMalloc(sizeof(*refcount_));
+ refcount_ = new int;
+ *refcount_ = 1;
+ }
}
-void pcl::gpu::DeviceMemory::copyTo(DeviceMemory& other) const
+void
+pcl::gpu::DeviceMemory::copyTo(DeviceMemory& other) const
{
- if (empty())
- other.release();
- else
- {
- other.create(sizeBytes_);
- cudaSafeCall( cudaMemcpy(other.data_, data_, sizeBytes_, cudaMemcpyDeviceToDevice) );
- cudaSafeCall( cudaDeviceSynchronize() );
- }
+ if (empty())
+ other.release();
+ else {
+ other.create(sizeBytes_);
+ cudaSafeCall(cudaMemcpy(other.data_, data_, sizeBytes_, cudaMemcpyDeviceToDevice));
+ cudaSafeCall(cudaDeviceSynchronize());
+ }
}
-void pcl::gpu::DeviceMemory::release()
+void
+pcl::gpu::DeviceMemory::release()
{
- if( refcount_ && CV_XADD(refcount_, -1) == 1 )
- {
- //cv::fastFree(refcount);
- delete refcount_;
- cudaSafeCall( cudaFree(data_) );
- }
- data_ = nullptr;
- sizeBytes_ = 0;
- refcount_ = nullptr;
+ if (refcount_ && CV_XADD(refcount_, -1) == 1) {
+ // cv::fastFree(refcount);
+ delete refcount_;
+ cudaSafeCall(cudaFree(data_));
+ }
+ data_ = nullptr;
+ sizeBytes_ = 0;
+ refcount_ = nullptr;
}
-void pcl::gpu::DeviceMemory::upload(const void *host_ptr_arg, std::size_t sizeBytes_arg)
+void
+pcl::gpu::DeviceMemory::upload(const void* host_ptr_arg, std::size_t sizeBytes_arg)
{
- create(sizeBytes_arg);
- cudaSafeCall( cudaMemcpy(data_, host_ptr_arg, sizeBytes_, cudaMemcpyHostToDevice) );
- cudaSafeCall( cudaDeviceSynchronize() );
+ create(sizeBytes_arg);
+ cudaSafeCall(cudaMemcpy(data_, host_ptr_arg, sizeBytes_, cudaMemcpyHostToDevice));
+ cudaSafeCall(cudaDeviceSynchronize());
}
-void pcl::gpu::DeviceMemory::download(void *host_ptr_arg) const
-{
- cudaSafeCall( cudaMemcpy(host_ptr_arg, data_, sizeBytes_, cudaMemcpyDeviceToHost) );
- cudaSafeCall( cudaDeviceSynchronize() );
-}
+bool
+pcl::gpu::DeviceMemory::upload(const void* host_ptr_arg,
+ std::size_t device_begin_byte_offset,
+ std::size_t num_bytes)
+{
+ if (device_begin_byte_offset + num_bytes > sizeBytes_) {
+ return false;
+ }
+ void* begin = static_cast<char*>(data_) + device_begin_byte_offset;
+ cudaSafeCall(cudaMemcpy(begin, host_ptr_arg, num_bytes, cudaMemcpyHostToDevice));
+ cudaSafeCall(cudaDeviceSynchronize());
+ return true;
+}
-void pcl::gpu::DeviceMemory::swap(DeviceMemory& other_arg)
+void
+pcl::gpu::DeviceMemory::download(void* host_ptr_arg) const
{
- std::swap(data_, other_arg.data_);
- std::swap(sizeBytes_, other_arg.sizeBytes_);
- std::swap(refcount_, other_arg.refcount_);
+ cudaSafeCall(cudaMemcpy(host_ptr_arg, data_, sizeBytes_, cudaMemcpyDeviceToHost));
+ cudaSafeCall(cudaDeviceSynchronize());
}
-bool pcl::gpu::DeviceMemory::empty() const { return !data_; }
-size_t pcl::gpu::DeviceMemory::sizeBytes() const { return sizeBytes_; }
+bool
+pcl::gpu::DeviceMemory::download(void* host_ptr_arg,
+ std::size_t device_begin_byte_offset,
+ std::size_t num_bytes) const
+{
+ if (device_begin_byte_offset + num_bytes > sizeBytes_) {
+ return false;
+ }
+ const void* begin = static_cast<char*>(data_) + device_begin_byte_offset;
+ cudaSafeCall(cudaMemcpy(host_ptr_arg, begin, num_bytes, cudaMemcpyDeviceToHost));
+ cudaSafeCall(cudaDeviceSynchronize());
+ return true;
+}
+void
+pcl::gpu::DeviceMemory::swap(DeviceMemory& other_arg)
+{
+ std::swap(data_, other_arg.data_);
+ std::swap(sizeBytes_, other_arg.sizeBytes_);
+ std::swap(refcount_, other_arg.refcount_);
+}
+
+bool
+pcl::gpu::DeviceMemory::empty() const
+{
+ return !data_;
+}
+size_t
+pcl::gpu::DeviceMemory::sizeBytes() const
+{
+ return sizeBytes_;
+}
//////////////////////// DeviceArray2D /////////////////////////////
-pcl::gpu::DeviceMemory2D::DeviceMemory2D() : data_(nullptr), step_(0), colsBytes_(0), rows_(0), refcount_(nullptr) {}
+pcl::gpu::DeviceMemory2D::DeviceMemory2D()
+: data_(nullptr), step_(0), colsBytes_(0), rows_(0), refcount_(nullptr)
+{}
-pcl::gpu::DeviceMemory2D::DeviceMemory2D(int rows_arg, int colsBytes_arg)
- : data_(nullptr), step_(0), colsBytes_(0), rows_(0), refcount_(nullptr)
-{
- create(rows_arg, colsBytes_arg);
+pcl::gpu::DeviceMemory2D::DeviceMemory2D(int rows_arg, int colsBytes_arg)
+: data_(nullptr), step_(0), colsBytes_(0), rows_(0), refcount_(nullptr)
+{
+ create(rows_arg, colsBytes_arg);
}
-pcl::gpu::DeviceMemory2D::DeviceMemory2D(int rows_arg, int colsBytes_arg, void *data_arg, std::size_t step_arg)
- : data_(data_arg), step_(step_arg), colsBytes_(colsBytes_arg), rows_(rows_arg), refcount_(nullptr) {}
+pcl::gpu::DeviceMemory2D::DeviceMemory2D(int rows_arg,
+ int colsBytes_arg,
+ void* data_arg,
+ std::size_t step_arg)
+: data_(data_arg)
+, step_(step_arg)
+, colsBytes_(colsBytes_arg)
+, rows_(rows_arg)
+, refcount_(nullptr)
+{}
pcl::gpu::DeviceMemory2D::~DeviceMemory2D() { release(); }
-
-pcl::gpu::DeviceMemory2D::DeviceMemory2D(const DeviceMemory2D& other_arg) :
- data_(other_arg.data_), step_(other_arg.step_), colsBytes_(other_arg.colsBytes_), rows_(other_arg.rows_), refcount_(other_arg.refcount_)
+pcl::gpu::DeviceMemory2D::DeviceMemory2D(const DeviceMemory2D& other_arg)
+: data_(other_arg.data_)
+, step_(other_arg.step_)
+, colsBytes_(other_arg.colsBytes_)
+, rows_(other_arg.rows_)
+, refcount_(other_arg.refcount_)
{
- if( refcount_ )
- CV_XADD(refcount_, 1);
+ if (refcount_)
+ CV_XADD(refcount_, 1);
}
-pcl::gpu::DeviceMemory2D& pcl::gpu::DeviceMemory2D::operator = (const pcl::gpu::DeviceMemory2D& other_arg)
+pcl::gpu::DeviceMemory2D&
+pcl::gpu::DeviceMemory2D::operator=(const pcl::gpu::DeviceMemory2D& other_arg)
{
- if( this != &other_arg )
- {
- if( other_arg.refcount_ )
- CV_XADD(other_arg.refcount_, 1);
- release();
-
- colsBytes_ = other_arg.colsBytes_;
- rows_ = other_arg.rows_;
- data_ = other_arg.data_;
- step_ = other_arg.step_;
-
- refcount_ = other_arg.refcount_;
- }
- return *this;
+ if (this != &other_arg) {
+ if (other_arg.refcount_)
+ CV_XADD(other_arg.refcount_, 1);
+ release();
+
+ colsBytes_ = other_arg.colsBytes_;
+ rows_ = other_arg.rows_;
+ data_ = other_arg.data_;
+ step_ = other_arg.step_;
+
+ refcount_ = other_arg.refcount_;
+ }
+ return *this;
}
-void pcl::gpu::DeviceMemory2D::create(int rows_arg, int colsBytes_arg)
+void
+pcl::gpu::DeviceMemory2D::create(int rows_arg, int colsBytes_arg)
{
- if (colsBytes_ == colsBytes_arg && rows_ == rows_arg)
- return;
-
- if( rows_arg > 0 && colsBytes_arg > 0)
- {
- if( data_ )
- release();
-
- colsBytes_ = colsBytes_arg;
- rows_ = rows_arg;
-
- cudaSafeCall( cudaMallocPitch( (void**)&data_, &step_, colsBytes_, rows_) );
+ if (colsBytes_ == colsBytes_arg && rows_ == rows_arg)
+ return;
+
+ if (rows_arg > 0 && colsBytes_arg > 0) {
+ if (data_)
+ release();
+
+ colsBytes_ = colsBytes_arg;
+ rows_ = rows_arg;
+
+ cudaSafeCall(cudaMallocPitch((void**)&data_, &step_, colsBytes_, rows_));
+
+ // refcount = (int*)cv::fastMalloc(sizeof(*refcount));
+ refcount_ = new int;
+ *refcount_ = 1;
+ }
+}
- //refcount = (int*)cv::fastMalloc(sizeof(*refcount));
- refcount_ = new int;
- *refcount_ = 1;
- }
+void
+pcl::gpu::DeviceMemory2D::release()
+{
+ if (refcount_ && CV_XADD(refcount_, -1) == 1) {
+ // cv::fastFree(refcount);
+ delete refcount_;
+ cudaSafeCall(cudaFree(data_));
+ }
+
+ colsBytes_ = 0;
+ rows_ = 0;
+ data_ = nullptr;
+ step_ = 0;
+ refcount_ = nullptr;
}
-void pcl::gpu::DeviceMemory2D::release()
+void
+pcl::gpu::DeviceMemory2D::copyTo(DeviceMemory2D& other) const
{
- if( refcount_ && CV_XADD(refcount_, -1) == 1 )
- {
- //cv::fastFree(refcount);
- delete refcount_;
- cudaSafeCall( cudaFree(data_) );
- }
+ if (empty())
+ other.release();
+ else {
+ other.create(rows_, colsBytes_);
+ cudaSafeCall(cudaMemcpy2D(other.data_,
+ other.step_,
+ data_,
+ step_,
+ colsBytes_,
+ rows_,
+ cudaMemcpyDeviceToDevice));
+ cudaSafeCall(cudaDeviceSynchronize());
+ }
+}
- colsBytes_ = 0;
- rows_ = 0;
- data_ = nullptr;
- step_ = 0;
- refcount_ = nullptr;
+void
+pcl::gpu::DeviceMemory2D::upload(const void* host_ptr_arg,
+ std::size_t host_step_arg,
+ int rows_arg,
+ int colsBytes_arg)
+{
+ create(rows_arg, colsBytes_arg);
+ cudaSafeCall(cudaMemcpy2D(data_,
+ step_,
+ host_ptr_arg,
+ host_step_arg,
+ colsBytes_,
+ rows_,
+ cudaMemcpyHostToDevice));
+ cudaSafeCall(cudaDeviceSynchronize());
}
-void pcl::gpu::DeviceMemory2D::copyTo(DeviceMemory2D& other) const
+void
+pcl::gpu::DeviceMemory2D::download(void* host_ptr_arg, std::size_t host_step_arg) const
{
- if (empty())
- other.release();
- else
- {
- other.create(rows_, colsBytes_);
- cudaSafeCall( cudaMemcpy2D(other.data_, other.step_, data_, step_, colsBytes_, rows_, cudaMemcpyDeviceToDevice) );
- cudaSafeCall( cudaDeviceSynchronize() );
- }
+ cudaSafeCall(cudaMemcpy2D(host_ptr_arg,
+ host_step_arg,
+ data_,
+ step_,
+ colsBytes_,
+ rows_,
+ cudaMemcpyDeviceToHost));
+ cudaSafeCall(cudaDeviceSynchronize());
}
-void pcl::gpu::DeviceMemory2D::upload(const void *host_ptr_arg, std::size_t host_step_arg, int rows_arg, int colsBytes_arg)
+void
+pcl::gpu::DeviceMemory2D::swap(DeviceMemory2D& other_arg)
{
- create(rows_arg, colsBytes_arg);
- cudaSafeCall( cudaMemcpy2D(data_, step_, host_ptr_arg, host_step_arg, colsBytes_, rows_, cudaMemcpyHostToDevice) );
- cudaSafeCall( cudaDeviceSynchronize() );
+ std::swap(data_, other_arg.data_);
+ std::swap(step_, other_arg.step_);
+
+ std::swap(colsBytes_, other_arg.colsBytes_);
+ std::swap(rows_, other_arg.rows_);
+ std::swap(refcount_, other_arg.refcount_);
}
-void pcl::gpu::DeviceMemory2D::download(void *host_ptr_arg, std::size_t host_step_arg) const
-{
- cudaSafeCall( cudaMemcpy2D(host_ptr_arg, host_step_arg, data_, step_, colsBytes_, rows_, cudaMemcpyDeviceToHost) );
- cudaSafeCall( cudaDeviceSynchronize() );
-}
+bool
+pcl::gpu::DeviceMemory2D::empty() const
+{
+ return !data_;
+}
-void pcl::gpu::DeviceMemory2D::swap(DeviceMemory2D& other_arg)
-{
- std::swap(data_, other_arg.data_);
- std::swap(step_, other_arg.step_);
+int
+pcl::gpu::DeviceMemory2D::colsBytes() const
+{
+ return colsBytes_;
+}
- std::swap(colsBytes_, other_arg.colsBytes_);
- std::swap(rows_, other_arg.rows_);
- std::swap(refcount_, other_arg.refcount_);
+int
+pcl::gpu::DeviceMemory2D::rows() const
+{
+ return rows_;
}
-bool pcl::gpu::DeviceMemory2D::empty() const { return !data_; }
-int pcl::gpu::DeviceMemory2D::colsBytes() const { return colsBytes_; }
-int pcl::gpu::DeviceMemory2D::rows() const { return rows_; }
-size_t pcl::gpu::DeviceMemory2D::step() const { return step_; }
+size_t
+pcl::gpu::DeviceMemory2D::step() const
+{
+ return step_;
+}
#endif
#include <pcl/gpu/containers/initialization.h>
-#include <iostream>
#include <cstdlib>
+#include <iostream>
-void pcl::gpu::error(const char *error_string, const char *file, const int line, const char *func)
-{
- std::cout << "Error: " << error_string << "\t" << file << ":" << line << std::endl;
- exit(EXIT_FAILURE);
+void
+pcl::gpu::error(const char* error_string,
+ const char* file,
+ const int line,
+ const char* func)
+{
+ std::cout << "Error: " << error_string << "\t" << file << ":" << line << std::endl;
+ exit(EXIT_FAILURE);
}
/*
-* Software License Agreement (BSD License)
-*
-* Copyright (c) 2011, Willow Garage, Inc.
-* All rights reserved.
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following conditions
-* are met:
-*
-* * Redistributions of source code must retain the above copyright
-* notice, this list of conditions and the following disclaimer.
-* * Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials provided
-* with the distribution.
-* * Neither the name of Willow Garage, Inc. nor the names of its
-* contributors may be used to endorse or promote products derived
-* from this software without specific prior written permission.
-*
-* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
-* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-* POSSIBILITY OF SUCH DAMAGE.
-*
-* Author: Anatoly Baskeheev, Itseez Ltd, (myname.mysurname@mycompany.com)
-*/
+ * Software License Agreement (BSD License)
+ *
+ * Copyright (c) 2011, Willow Garage, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of Willow Garage, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Author: Anatoly Baskeheev, Itseez Ltd, (myname.mysurname@mycompany.com)
+ */
#include <pcl/gpu/containers/initialization.h>
#include <pcl/gpu/utils/safe_call.hpp>
#include "cuda.h"
+
+#include <array> // replace c-style array with std::array
#include <cstdio>
#define HAVE_CUDA
//#include <pcl_config.h>
-
#if !defined(HAVE_CUDA)
-void throw_nogpu() { throw "PCL 2.0 exception"; }
-int pcl::gpu::getCudaEnabledDeviceCount() { return 0; }
-void pcl::gpu::setDevice(int /*device*/) { throw_nogpu(); }
-std::string pcl::gpu::getDeviceName(int /*device*/) { throw_nogpu(); }
-void pcl::gpu::printCudaDeviceInfo(int /*device*/){ throw_nogpu(); }
-void pcl::gpu::printShortCudaDeviceInfo(int /*device*/) { throw_nogpu(); }
+void
+throw_nogpu()
+{
+ throw "PCL 2.0 exception";
+}
-#else
+int
+pcl::gpu::getCudaEnabledDeviceCount()
+{
+ return 0;
+}
+
+void
+pcl::gpu::setDevice(int /*device*/)
+{
+ throw_nogpu();
+}
+std::string
+pcl::gpu::getDeviceName(int /*device*/)
+{
+ throw_nogpu();
+}
-int pcl::gpu::getCudaEnabledDeviceCount()
+void
+pcl::gpu::printCudaDeviceInfo(int /*device*/)
{
- int count;
- cudaError_t error = cudaGetDeviceCount( &count );
+ throw_nogpu();
+}
- if (error == cudaErrorInsufficientDriver)
- return -1;
+void
+pcl::gpu::printShortCudaDeviceInfo(int /*device*/)
+{
+ throw_nogpu();
+}
+
+#else
- if (error == cudaErrorNoDevice)
- return 0;
+int
+pcl::gpu::getCudaEnabledDeviceCount()
+{
+ int count;
+ cudaError_t error = cudaGetDeviceCount(&count);
- cudaSafeCall(error);
- return count;
+ if (error == cudaErrorInsufficientDriver)
+ return -1;
+
+ if (error == cudaErrorNoDevice)
+ return 0;
+
+ cudaSafeCall(error);
+ return count;
}
-void pcl::gpu::setDevice(int device)
+void
+pcl::gpu::setDevice(int device)
{
- cudaSafeCall( cudaSetDevice( device ) );
+ cudaSafeCall(cudaSetDevice(device));
}
-std::string pcl::gpu::getDeviceName(int device)
+std::string
+pcl::gpu::getDeviceName(int device)
{
- cudaDeviceProp prop;
- cudaSafeCall( cudaGetDeviceProperties(&prop, device) );
+ cudaDeviceProp prop;
+ cudaSafeCall(cudaGetDeviceProperties(&prop, device));
- return prop.name;
+ return prop.name;
}
-bool pcl::gpu::checkIfPreFermiGPU(int device)
+bool
+pcl::gpu::checkIfPreFermiGPU(int device)
{
if (device < 0)
- cudaSafeCall( cudaGetDevice(&device) );
+ cudaSafeCall(cudaGetDevice(&device));
cudaDeviceProp prop;
- cudaSafeCall( cudaGetDeviceProperties(&prop, device) );
+ cudaSafeCall(cudaGetDeviceProperties(&prop, device));
return prop.major < 2; // CC == 1.x
}
-namespace
+namespace {
+template <class T>
+inline void
+getCudaAttribute(T* attribute, CUdevice_attribute device_attribute, int device)
{
- template <class T> inline void getCudaAttribute(T *attribute, CUdevice_attribute device_attribute, int device)
- {
- *attribute = T();
- CUresult error = CUDA_SUCCESS;// = cuDeviceGetAttribute( attribute, device_attribute, device );
- if( CUDA_SUCCESS == error )
- return;
-
- printf("Driver API error = %04d\n", error);
- pcl::gpu::error("driver API error", __FILE__, __LINE__);
- }
-
- inline int convertSMVer2Cores(int major, int minor)
- {
- // Defines for GPU Architecture types (using the SM version to determine the # of cores per SM
- struct SMtoCores {
- int SM; // 0xMm (hexadecimal notation), M = SM Major version, and m = SM minor version
- int Cores;
- };
-
- SMtoCores gpuArchCoresPerSM[] = {
- {0x10, 8}, {0x11, 8}, {0x12, 8}, {0x13, 8}, {0x20, 32}, {0x21, 48}, {0x30, 192},
- {0x35, 192}, {0x50, 128}, {0x52, 128}, {0x53, 128}, {0x60, 64}, {0x61, 128}, {-1, -1}
- };
- int index = 0;
- while (gpuArchCoresPerSM[index].SM != -1)
- {
- if (gpuArchCoresPerSM[index].SM == ((major << 4) + minor) )
- return gpuArchCoresPerSM[index].Cores;
- index++;
- }
- printf("\nCan't determine number of cores. Unknown SM version %d.%d!\n", major, minor);
- return 0;
- }
+ *attribute = T();
+ CUresult error =
+ CUDA_SUCCESS; // = cuDeviceGetAttribute( attribute, device_attribute, device );
+ if (CUDA_SUCCESS == error)
+ return;
+
+ printf("Driver API error = %04d\n", error);
+ pcl::gpu::error("driver API error", __FILE__, __LINE__);
}
-void pcl::gpu::printCudaDeviceInfo(int device)
+inline int
+convertSMVer2Cores(int major, int minor)
{
- int count = getCudaEnabledDeviceCount();
- bool valid = (device >= 0) && (device < count);
-
- int beg = valid ? device : 0;
- int end = valid ? device+1 : count;
-
- printf("*** CUDA Device Query (Runtime API) version (CUDART static linking) *** \n\n");
- printf("Device count: %d\n", count);
-
- int driverVersion = 0, runtimeVersion = 0;
- cudaSafeCall( cudaDriverGetVersion(&driverVersion) );
- cudaSafeCall( cudaRuntimeGetVersion(&runtimeVersion) );
-
- const char *computeMode[] = {
- "Default (multiple host threads can use ::cudaSetDevice() with device simultaneously)",
- "Exclusive (only one host thread in one process is able to use ::cudaSetDevice() with this device)",
- "Prohibited (no host thread can use ::cudaSetDevice() with this device)",
- "Exclusive Process (many threads in one process is able to use ::cudaSetDevice() with this device)",
- "Unknown",
- nullptr
- };
-
- for(int dev = beg; dev < end; ++dev)
- {
- cudaDeviceProp prop;
- cudaSafeCall( cudaGetDeviceProperties(&prop, dev) );
-
- int sm_cores = convertSMVer2Cores(prop.major, prop.minor);
-
- printf("\nDevice %d: \"%s\"\n", dev, prop.name);
- printf(" CUDA Driver Version / Runtime Version %d.%d / %d.%d\n", driverVersion/1000, driverVersion%100, runtimeVersion/1000, runtimeVersion%100);
- printf(" CUDA Capability Major/Minor version number: %d.%d\n", prop.major, prop.minor);
- printf(" Total amount of global memory: %.0f MBytes (%llu bytes)\n", (float)prop.totalGlobalMem/1048576.0f, (unsigned long long) prop.totalGlobalMem);
- printf(" (%2d) Multiprocessors x (%2d) CUDA Cores/MP: %d CUDA Cores\n", prop.multiProcessorCount, sm_cores, sm_cores * prop.multiProcessorCount);
- printf(" GPU Clock Speed: %.2f GHz\n", prop.clockRate * 1e-6f);
-
- // This is not available in the CUDA Runtime API, so we make the necessary calls the driver API to support this for output
- int memoryClock, memBusWidth, L2CacheSize;
- getCudaAttribute<int>( &memoryClock, CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE, dev );
- getCudaAttribute<int>( &memBusWidth, CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH, dev );
- getCudaAttribute<int>( &L2CacheSize, CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE, dev );
-
- printf(" Memory Clock rate: %.2f Mhz\n", memoryClock * 1e-3f);
- printf(" Memory Bus Width: %d-bit\n", memBusWidth);
- if (L2CacheSize)
- printf(" L2 Cache Size: %d bytes\n", L2CacheSize);
-
- printf(" Max Texture Dimension Size (x,y,z) 1D=(%d), 2D=(%d,%d), 3D=(%d,%d,%d)\n",
- prop.maxTexture1D, prop.maxTexture2D[0], prop.maxTexture2D[1],
- prop.maxTexture3D[0], prop.maxTexture3D[1], prop.maxTexture3D[2]);
- printf(" Max Layered Texture Size (dim) x layers 1D=(%d) x %d, 2D=(%d,%d) x %d\n",
- prop.maxTexture1DLayered[0], prop.maxTexture1DLayered[1],
- prop.maxTexture2DLayered[0], prop.maxTexture2DLayered[1], prop.maxTexture2DLayered[2]);
- printf(" Total amount of constant memory: %u bytes\n", (int)prop.totalConstMem);
- printf(" Total amount of shared memory per block: %u bytes\n", (int)prop.sharedMemPerBlock);
- printf(" Total number of registers available per block: %d\n", prop.regsPerBlock);
- printf(" Warp size: %d\n", prop.warpSize);
- printf(" Maximum number of threads per block: %d\n", prop.maxThreadsPerBlock);
- printf(" Maximum sizes of each dimension of a block: %d x %d x %d\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]);
- printf(" Maximum sizes of each dimension of a grid: %d x %d x %d\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
- printf(" Maximum memory pitch: %u bytes\n", (int)prop.memPitch);
- printf(" Texture alignment: %u bytes\n", (int)prop.textureAlignment);
-
- printf(" Concurrent copy and execution: %s with %d copy engine(s)\n", (prop.deviceOverlap ? "Yes" : "No"), prop.asyncEngineCount);
- printf(" Run time limit on kernels: %s\n", prop.kernelExecTimeoutEnabled ? "Yes" : "No");
- printf(" Integrated GPU sharing Host Memory: %s\n", prop.integrated ? "Yes" : "No");
- printf(" Support host page-locked memory mapping: %s\n", prop.canMapHostMemory ? "Yes" : "No");
-
- printf(" Concurrent kernel execution: %s\n", prop.concurrentKernels ? "Yes" : "No");
- printf(" Alignment requirement for Surfaces: %s\n", prop.surfaceAlignment ? "Yes" : "No");
- printf(" Device has ECC support enabled: %s\n", prop.ECCEnabled ? "Yes" : "No");
- printf(" Device is using TCC driver mode: %s\n", prop.tccDriver ? "Yes" : "No");
- printf(" Device supports Unified Addressing (UVA): %s\n", prop.unifiedAddressing ? "Yes" : "No");
- printf(" Device PCI Bus ID / PCI location ID: %d / %d\n", prop.pciBusID, prop.pciDeviceID );
- printf(" Compute Mode:\n");
- printf(" %s \n", computeMode[prop.computeMode]);
- }
-
- printf("\n");
- printf("deviceQuery, CUDA Driver = CUDART");
- printf(", CUDA Driver Version = %d.%d", driverVersion / 1000, driverVersion % 100);
- printf(", CUDA Runtime Version = %d.%d", runtimeVersion/1000, runtimeVersion%100);
- printf(", NumDevs = %d\n\n", count);
- fflush(stdout);
+ // Defines for GPU Architecture types (using the SM version to determine the # of
+ // cores per SM
+ struct SMtoCores {
+ int SM; // 0xMm (hexadecimal notation), M = SM Major version, and m = SM minor
+ // version
+ int Cores;
+ };
+
+ std::array<SMtoCores, 15> gpuArchCoresPerSM = {{{0x30, 192},
+ {0x32, 192},
+ {0x35, 192},
+ {0x37, 192},
+ {0x50, 128},
+ {0x52, 128},
+ {0x53, 128},
+ {0x60, 64},
+ {0x61, 128},
+ {0x62, 128},
+ {0x70, 64},
+ {0x72, 64},
+ {0x75, 64},
+ {0x80, 64},
+ {0x86, 128}}};
+ for (const auto& sm2cores : gpuArchCoresPerSM) {
+ if (sm2cores.SM == ((major << 4) + minor))
+ return sm2cores.Cores;
+ }
+ printf(
+ "\nCan't determine number of cores. Unknown SM version %d.%d!\n", major, minor);
+ return 0;
}
+} // namespace
-void pcl::gpu::printShortCudaDeviceInfo(int device)
+void
+pcl::gpu::printCudaDeviceInfo(int device)
{
- int count = getCudaEnabledDeviceCount();
- bool valid = (device >= 0) && (device < count);
-
- int beg = valid ? device : 0;
- int end = valid ? device+1 : count;
-
- int driverVersion = 0, runtimeVersion = 0;
- cudaSafeCall( cudaDriverGetVersion(&driverVersion) );
- cudaSafeCall( cudaRuntimeGetVersion(&runtimeVersion) );
-
- for(int dev = beg; dev < end; ++dev)
- {
- cudaDeviceProp prop;
- cudaSafeCall( cudaGetDeviceProperties(&prop, dev) );
-
- const char *arch_str = prop.major < 2 ? " (pre-Fermi)" : "";
- printf("[pcl::gpu::printShortCudaDeviceInfo] : Device %d: \"%s\" %.0fMb", dev, prop.name, (float)prop.totalGlobalMem/1048576.0f);
- printf(", sm_%d%d%s, %d cores", prop.major, prop.minor, arch_str, convertSMVer2Cores(prop.major, prop.minor) * prop.multiProcessorCount);
- printf(", Driver/Runtime ver.%d.%d/%d.%d\n", driverVersion/1000, driverVersion%100, runtimeVersion/1000, runtimeVersion%100);
- }
- fflush(stdout);
+ int count = getCudaEnabledDeviceCount();
+ bool valid = (device >= 0) && (device < count);
+
+ int beg = valid ? device : 0;
+ int end = valid ? device + 1 : count;
+
+ printf(
+ "*** CUDA Device Query (Runtime API) version (CUDART static linking) *** \n\n");
+ printf("Device count: %d\n", count);
+
+ int driverVersion = 0, runtimeVersion = 0;
+ cudaSafeCall(cudaDriverGetVersion(&driverVersion));
+ cudaSafeCall(cudaRuntimeGetVersion(&runtimeVersion));
+
+ const char* computeMode[] = {
+ "Default (multiple host threads can use ::cudaSetDevice() simultaneously)",
+ "Exclusive (only one host thread in one process can use ::cudaSetDevice())",
+ "Prohibited (no host thread can use ::cudaSetDevice())",
+ "Exclusive Process (many threads in one process can use ::cudaSetDevice())",
+ "Unknown",
+ nullptr};
+
+ for (int dev = beg; dev < end; ++dev) {
+ cudaDeviceProp prop;
+ cudaSafeCall(cudaGetDeviceProperties(&prop, dev));
+
+ int sm_cores = convertSMVer2Cores(prop.major, prop.minor);
+
+ printf("\nDevice %d: \"%s\"\n", dev, prop.name);
+ printf(" CUDA Driver Version / Runtime Version %d.%d / %d.%d\n",
+ driverVersion / 1000,
+ driverVersion % 100,
+ runtimeVersion / 1000,
+ runtimeVersion % 100);
+ printf(" CUDA Capability Major/Minor version number: %d.%d\n",
+ prop.major,
+ prop.minor);
+ printf(
+ " Total amount of global memory: %.0f MBytes (%llu bytes)\n",
+ (float)prop.totalGlobalMem / 1048576.0f,
+ (unsigned long long)prop.totalGlobalMem);
+ printf(" (%2d) Multiprocessors x (%2d) CUDA Cores/MP: %d CUDA Cores\n",
+ prop.multiProcessorCount,
+ sm_cores,
+ sm_cores * prop.multiProcessorCount);
+ printf(" GPU Clock Speed: %.2f GHz\n",
+ prop.clockRate * 1e-6f);
+
+ // This is not available in the CUDA Runtime API, so we make the necessary calls the
+ // driver API to support this for output
+ int memoryClock, memBusWidth, L2CacheSize;
+ getCudaAttribute<int>(&memoryClock, CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE, dev);
+ getCudaAttribute<int>(
+ &memBusWidth, CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH, dev);
+ getCudaAttribute<int>(&L2CacheSize, CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE, dev);
+
+ printf(" Memory Clock rate: %.2f Mhz\n",
+ memoryClock * 1e-3f);
+ printf(" Memory Bus Width: %d-bit\n", memBusWidth);
+ if (L2CacheSize)
+ printf(" L2 Cache Size: %d bytes\n",
+ L2CacheSize);
+
+ printf(" Max Texture Dimension Size (x,y,z) 1D=(%d), 2D=(%d,%d), "
+ "3D=(%d,%d,%d)\n",
+ prop.maxTexture1D,
+ prop.maxTexture2D[0],
+ prop.maxTexture2D[1],
+ prop.maxTexture3D[0],
+ prop.maxTexture3D[1],
+ prop.maxTexture3D[2]);
+ printf(" Max Layered Texture Size (dim) x layers 1D=(%d) x %d, 2D=(%d,%d) "
+ "x %d\n",
+ prop.maxTexture1DLayered[0],
+ prop.maxTexture1DLayered[1],
+ prop.maxTexture2DLayered[0],
+ prop.maxTexture2DLayered[1],
+ prop.maxTexture2DLayered[2]);
+ printf(" Total amount of constant memory: %u bytes\n",
+ (int)prop.totalConstMem);
+ printf(" Total amount of shared memory per block: %u bytes\n",
+ (int)prop.sharedMemPerBlock);
+ printf(" Total number of registers available per block: %d\n", prop.regsPerBlock);
+ printf(" Warp size: %d\n", prop.warpSize);
+ printf(" Maximum number of threads per block: %d\n",
+ prop.maxThreadsPerBlock);
+ printf(" Maximum sizes of each dimension of a block: %d x %d x %d\n",
+ prop.maxThreadsDim[0],
+ prop.maxThreadsDim[1],
+ prop.maxThreadsDim[2]);
+ printf(" Maximum sizes of each dimension of a grid: %d x %d x %d\n",
+ prop.maxGridSize[0],
+ prop.maxGridSize[1],
+ prop.maxGridSize[2]);
+ printf(" Maximum memory pitch: %u bytes\n",
+ (int)prop.memPitch);
+ printf(" Texture alignment: %u bytes\n",
+ (int)prop.textureAlignment);
+
+ printf(
+ " Concurrent copy and execution: %s with %d copy engine(s)\n",
+ (prop.deviceOverlap ? "Yes" : "No"),
+ prop.asyncEngineCount);
+ printf(" Run time limit on kernels: %s\n",
+ prop.kernelExecTimeoutEnabled ? "Yes" : "No");
+ printf(" Integrated GPU sharing Host Memory: %s\n",
+ prop.integrated ? "Yes" : "No");
+ printf(" Support host page-locked memory mapping: %s\n",
+ prop.canMapHostMemory ? "Yes" : "No");
+
+ printf(" Concurrent kernel execution: %s\n",
+ prop.concurrentKernels ? "Yes" : "No");
+ printf(" Alignment requirement for Surfaces: %s\n",
+ prop.surfaceAlignment ? "Yes" : "No");
+ printf(" Device has ECC support enabled: %s\n",
+ prop.ECCEnabled ? "Yes" : "No");
+ printf(" Device is using TCC driver mode: %s\n",
+ prop.tccDriver ? "Yes" : "No");
+ printf(" Device supports Unified Addressing (UVA): %s\n",
+ prop.unifiedAddressing ? "Yes" : "No");
+ printf(" Device PCI Bus ID / PCI location ID: %d / %d\n",
+ prop.pciBusID,
+ prop.pciDeviceID);
+ printf(" Compute Mode:\n");
+ printf(" %s \n", computeMode[prop.computeMode]);
+ }
+
+ printf("\n");
+ printf("deviceQuery, CUDA Driver = CUDART");
+ printf(", CUDA Driver Version = %d.%d", driverVersion / 1000, driverVersion % 100);
+ printf(", CUDA Runtime Version = %d.%d", runtimeVersion / 1000, runtimeVersion % 100);
+ printf(", NumDevs = %d\n\n", count);
+ fflush(stdout);
+}
+
+void
+pcl::gpu::printShortCudaDeviceInfo(int device)
+{
+ int count = getCudaEnabledDeviceCount();
+ bool valid = (device >= 0) && (device < count);
+
+ int beg = valid ? device : 0;
+ int end = valid ? device + 1 : count;
+
+ int driverVersion = 0, runtimeVersion = 0;
+ cudaSafeCall(cudaDriverGetVersion(&driverVersion));
+ cudaSafeCall(cudaRuntimeGetVersion(&runtimeVersion));
+
+ for (int dev = beg; dev < end; ++dev) {
+ cudaDeviceProp prop;
+ cudaSafeCall(cudaGetDeviceProperties(&prop, dev));
+
+ const char* arch_str = prop.major < 2 ? " (pre-Fermi)" : "";
+ printf("[pcl::gpu::printShortCudaDeviceInfo] : Device %d: \"%s\" %.0fMb",
+ dev,
+ prop.name,
+ (float)prop.totalGlobalMem / 1048576.0f);
+ printf(", sm_%d%d%s, %d cores",
+ prop.major,
+ prop.minor,
+ arch_str,
+ convertSMVer2Cores(prop.major, prop.minor) * prop.multiProcessorCount);
+ printf(", Driver/Runtime ver.%d.%d/%d.%d\n",
+ driverVersion / 1000,
+ driverVersion % 100,
+ runtimeVersion / 1000,
+ runtimeVersion % 100);
+ }
+ fflush(stdout);
}
#endif
{
pcl::PointXYZ p;
p.x = w; p.y = h; p.z = 1;
- cloud.points.push_back(p);
+ cloud.push_back(p);
}
}
for (std::size_t j = 0; j < sizes[i] ; ++j)
{
- cloud_result.points.push_back(cloud[data[j + i * max_answers]]);
+ cloud_result.push_back(cloud[data[j + i * max_answers]]);
std::cout << "INFO: data : " << j << " " << j + i * max_answers << " data " << data[j+ i * max_answers] << std::endl;
}
std::stringstream ss;
printf("CPU Time taken: %.2fs\n", (double)(clock() - tStart)/CLOCKS_PER_SEC);
int j = 0;
- for (std::vector<pcl::PointIndices>::const_iterator it = cluster_indices.begin (); it != cluster_indices.end (); ++it)
+ for (const pcl::PointIndices& cluster: cluster_indices)
{
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_cluster (new pcl::PointCloud<pcl::PointXYZ>);
- for (std::vector<int>::const_iterator pit = it->indices.begin (); pit != it->indices.end (); ++pit)
- cloud_cluster->push_back ((*cloud_filtered)[*pit]); //*
+ for (const auto& index : (cluster.indices))
+ cloud_cluster->push_back ((*cloud_filtered)[index]); //*
cloud_cluster->width = cloud_cluster->size ();
cloud_cluster->height = 1;
cloud_cluster->is_dense = true;
std::cout << "INFO: stopped with the GPU version" << std::endl;
j = 0;
- for (std::vector<pcl::PointIndices>::const_iterator it = cluster_indices_gpu.begin (); it != cluster_indices_gpu.end (); ++it)
+ for (const pcl::PointIndices& cluster : cluster_indices_gpu)
{
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_cluster_gpu (new pcl::PointCloud<pcl::PointXYZ>);
- for (std::vector<int>::const_iterator pit = it->indices.begin (); pit != it->indices.end (); ++pit)
- cloud_cluster_gpu->push_back ((*cloud_filtered)[*pit]); //*
+ for (const auto& index : (cluster.indices))
+ cloud_cluster_gpu->push_back ((*cloud_filtered)[index]); //*
cloud_cluster_gpu->width = cloud_cluster_gpu->size ();
cloud_cluster_gpu->height = 1;
cloud_cluster_gpu->is_dense = true;
float3 row1, row2, row3; // == rotation_mg
AngleAxisf(acos_value, cross_vector_norm, row1, row2, row3);
- float3 traslation;
- //traslation.x = row1.x * -model_reference_point.x + row1.y * -model_reference_point.y + row1.z * -model_reference_point.z;
- traslation.y = row2.x * -model_reference_point.x + row2.y * -model_reference_point.y + row2.z * -model_reference_point.z;
- traslation.z = row3.x * -model_reference_point.x + row3.y * -model_reference_point.y + row3.z * -model_reference_point.z;
+ float3 translation;
+ //translation.x = row1.x * -model_reference_point.x + row1.y * -model_reference_point.y + row1.z * -model_reference_point.z;
+ translation.y = row2.x * -model_reference_point.x + row2.y * -model_reference_point.y + row2.z * -model_reference_point.z;
+ translation.z = row3.x * -model_reference_point.x + row3.y * -model_reference_point.y + row3.z * -model_reference_point.z;
float3 model_point_transformed;// = transform_mg * model_point;
- //model_point_transformed.x = traslation.x + row1.x * model_point.x + row1.y * model_point.y + row1.z * model_point.z;
- model_point_transformed.y = traslation.y + row2.x * model_point.x + row2.y * model_point.y + row2.z * model_point.z;
- model_point_transformed.z = traslation.z + row3.x * model_point.x + row3.y * model_point.y + row3.z * model_point.z;
+ //model_point_transformed.x = translation.x + row1.x * model_point.x + row1.y * model_point.y + row1.z * model_point.z;
+ model_point_transformed.y = translation.y + row2.x * model_point.x + row2.y * model_point.y + row2.z * model_point.z;
+ model_point_transformed.z = translation.z + row3.x * model_point.x + row3.y * model_point.y + row3.z * model_point.z;
float angle = std::atan2 ( -model_point_transformed.z, model_point_transformed.y);
if (computePairFeatures (current_point[warp_idx], current_nomal[warp_idx], p, n, f1, f2, f3, f4))
{
// Normalize the f1, f2, f3 features and push them in the histogram
- h_index = std::floor (bins1 * ((f1 + M_PI) * (1.0f / (2.0f * M_PI))));
+ //Using floorf due to changes to MSVC 16.9. See details here: https://devtalk.blender.org/t/cuda-compile-error-windows-10/17886/4
+ //floorf is without std:: see why here: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79700
+ h_index = floorf (bins1 * ((f1 + M_PI) * (1.0f / (2.0f * M_PI))));
h_index = min(bins1 - 1, max(0, h_index));
atomicAdd(shist_b1 + h_index, hist_incr);
- h_index = std::floor (bins2 * ((f2 + 1.0f) * 0.5f));
+ h_index = floorf (bins2 * ((f2 + 1.0f) * 0.5f));
h_index = min(bins2 - 1, max (0, h_index));
atomicAdd(shist_b2 + h_index, hist_incr);
- h_index = std::floor (bins3 * ((f3 + 1.0f) * 0.5f));
+ h_index = floorf (bins3 * ((f3 + 1.0f) * 0.5f));
h_index = min(bins3 - 1, max (0, h_index));
atomicAdd(shist_b3 + h_index, hist_incr);
//if (computePairFeatures (pi, ni, pj, nj, f1, f2, f3, f4))
{
// Normalize the f1, f2, f3 features and push them in the histogram
- int find0 = std::floor( NR_SPLIT * ((f1 + PI) * (1.f / (2.f * PI))) );
+ //Using floorf due to changes to MSVC 16.9. See details here: https://devtalk.blender.org/t/cuda-compile-error-windows-10/17886/4
+ //floorf is without std:: see why here: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79700
+ int find0 = floorf( NR_SPLIT * ((f1 + PI) * (1.f / (2.f * PI))) );
find0 = min(NR_SPLIT - 1, max(0, find0));
- int find1 = std::floor( NR_SPLIT * ( (f2 + 1.f) * 0.5f ) );
+ int find1 = floorf( NR_SPLIT * ( (f2 + 1.f) * 0.5f ) );
find1 = min(NR_SPLIT - 1, max(0, find1));
- int find2 = std::floor( NR_SPLIT * ( (f3 + 1.f) * 0.5f ) );
+ int find2 = floorf( NR_SPLIT * ( (f3 + 1.f) * 0.5f ) );
find2 = min(NR_SPLIT - 1, max(0, find2));
int h_index = find0 + NR_SPLIT * find1 + NR_SPLIT_2 * find2;
computeRGBPairFeatures_RGBOnly(ci, cj, f5, f6, f7);
// color ratios are in [-1, 1]
- int find4 = std::floor (NR_SPLIT * ((f5 + 1.f) * 0.5f));
+ int find4 = floorf(NR_SPLIT * ((f5 + 1.f) * 0.5f));
find4 = min(NR_SPLIT - 1, max(0, find4));
- int find5 = std::floor (NR_SPLIT * ((f6 + 1.f) * 0.5f));
+ int find5 = floorf(NR_SPLIT * ((f6 + 1.f) * 0.5f));
find5 = min(NR_SPLIT - 1, max(0, find5));
- int find6 = std::floor (NR_SPLIT * ((f7 + 1.f) * 0.5f));
+ int find6 = floorf(NR_SPLIT * ((f7 + 1.f) * 0.5f));
find6 = min(NR_SPLIT - 1, max(0, find6));
// and the colors
// bilinear interpolation
float beta_bin_size = radial ? (PI*0.5f/image_width) : bin_size;
- int beta_bin = std::floor(beta / beta_bin_size) + image_width;
- int alpha_bin = std::floor(alpha / bin_size);
+ //Using floorf due to changes to MSVC 16.9. See details here: https://devtalk.blender.org/t/cuda-compile-error-windows-10/17886/4
+ //floorf is without std:: see why here: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79700
+ int beta_bin = floorf(beta / beta_bin_size) + image_width;
+ int alpha_bin = floorf(alpha / bin_size);
//alpha_bin = min(simage_cols, max(0, alpha_bin));
//beta_bin = min(simage_rows, max(0, beta_bin));
if (computePairFeatures(centroid_p, centroid_n, p, n, f1, f2, f3, f4))
{
// Normalize the f1, f2, f3, f4 features and push them in the histogram
- h_index = std::floor (bins1 * ((f1 + M_PI) * (1.f / (2.f * M_PI))));
+ //Using floorf due to changes to MSVC 16.9. See details here: https://devtalk.blender.org/t/cuda-compile-error-windows-10/17886/4
+ //floorf is without std:: see why here: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79700
+ h_index = floorf (bins1 * ((f1 + M_PI) * (1.f / (2.f * M_PI))));
h_index = min(bins1 - 1, max(0, h_index));
atomicAdd(shist_b1 + h_index, hist_incr);
- h_index = std::floor (bins2 * ((f2 + 1.f) * 0.5f));
+ h_index = floorf (bins2 * ((f2 + 1.f) * 0.5f));
h_index = min(bins2 - 1, max (0, h_index));
atomicAdd(shist_b2 + h_index, hist_incr);
- h_index = std::floor (bins3 * ((f3 + 1.f) * 0.5f));
+ h_index = floorf (bins3 * ((f3 + 1.f) * 0.5f));
h_index = min(bins3 - 1, max (0, h_index));
atomicAdd(shist_b3 + h_index, hist_incr);
if (normalize_distances)
- h_index = std::floor (bins4 * (f4 * distance_normalization_factor_inv));
+ h_index = floorf (bins4 * (f4 * distance_normalization_factor_inv));
else
h_index = __float2int_rn (f4 * 100);
// viewpoint component
float alfa = ((dot(n, d_vp_p) + 1.f) * 0.5f);
- h_index = std::floor (bins_vp * alfa);
+ h_index = floorf (bins_vp * alfa);
h_index = min(bins_vp - 1, max (0, h_index));
atomicAdd(shist_vp + h_index, hist_incr_vp);
#include <pcl/common/common.h>
#include <pcl/gpu/features/features.hpp>
#include "data_source.hpp"
-#include <pcl/search/pcl_search.h>
using namespace pcl;
using namespace pcl::gpu;
const std::size_t N = 5;
PointCloud<PointXYZ> cloud;
- cloud.points.assign(N, {0.0, 0.0, 0.0});
+ cloud.assign(N, {0.0, 0.0, 0.0});
const float radius_search = 2.0F;
const int max_results = 500;
std::vector<int> volume_host;
volume_.download (volume_host, cols);
- cloud.points.clear ();
- cloud.points.reserve (10000);
+ cloud.clear ();
+ cloud.reserve (10000);
constexpr int DIVISOR = device::DIVISOR; // SHRT_MAX;
xyz.y = point (1);
xyz.z = point (2);
- cloud.points.push_back (xyz);
+ cloud.push_back (xyz);
}
}
dz = 0;
xyz.y = point (1);
xyz.z = point (2);
- cloud.points.push_back (xyz);
+ cloud.push_back (xyz);
}
}
}
xyz.y = point (1);
xyz.z = point (2);
- cloud.points.push_back (xyz);
+ cloud.push_back (xyz);
}
}
} /* if (connected26) */
tsdf[i] = (float)(elem.x)/device::DIVISOR;
weights[i] = (short)(elem.y);
}
-}
\ No newline at end of file
+}
else()
set(DEFAULT TRUE)
set(REASON)
- set(VTK_USE_FILE "${VTK_USE_FILE}" CACHE INTERNAL "VTK_USE_FILE")
- include("${VTK_USE_FILE}")
include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include")
endif()
#include <pcl/console/parse.h>
#include <boost/filesystem.hpp>
+#include <boost/date_time/posix_time/posix_time.hpp> // for microsec_clock::local_time
#include <pcl/gpu/kinfu/kinfu.h>
#include <pcl/gpu/kinfu/raycaster.h>
}
/** \brief Obtain the actual color for the input dataset as vtk scalars.
- * \param[out] scalars the output scalars containing the color for the dataset
- * \return true if the operation was successful (the handler is capable and
- * the input cloud was given as a valid pointer), false otherwise
+ * \return Array of scalars if the operation was successful (the handler is capable and
+ * the input cloud was given as a valid pointer), nullptr otherwise
*/
- bool
- getColor (vtkSmartPointer<vtkDataArray> &scalars) const override
+ vtkSmartPointer<vtkDataArray>
+ getColor () const override
{
if (!capable_ || !cloud_)
- return (false);
+ return nullptr;
- if (!scalars)
- scalars = vtkSmartPointer<vtkUnsignedCharArray>::New ();
+ auto scalars = vtkSmartPointer<vtkUnsignedCharArray>::New ();
+
scalars->SetNumberOfComponents (3);
vtkIdType nr_points = vtkIdType (cloud_->size ());
colors[idx + 1] = (*rgb_)[cp].g;
colors[idx + 2] = (*rgb_)[cp].b;
}
- return (true);
+ return scalars;
}
private:
#include "../src/internal.h"
-#include <Eigen/Dense>
#include <cmath>
#include <iostream>
#include <pcl/memory.h>
#pragma omp parallel for \
default(none) \
shared(cloud, output)
- for(int i = 0; i < (int) cloud.points.size (); ++i)
+ for(int i = 0; i < (int) cloud.size (); ++i)
{
int x = cloud[i].x;
int y = cloud[i].y;
#define PCL_WORLD_MODEL_IMPL_HPP_
#include <pcl/gpu/kinfu_large_scale/world_model.h>
+#include <pcl/common/transforms.h> // for transformPointCloud
template <typename PointT>
void
// apply filter
condrem.filter (existing_slice);
- if(!existing_slice.points.empty ())
+ if(!existing_slice.empty ())
{
//transform the slice in new cube coordinates
Eigen::Affine3f transformation;
// remove nans from world cloud
world_->is_dense = false;
- std::vector<int> indices;
+ pcl::Indices indices;
pcl::removeNaNFromPointCloud ( *world_, *world_, indices);
PCL_INFO("World contains %zu points after nan removal.\n",
#include <pcl/common/impl/common.hpp>
#include <pcl/filters/extract_indices.h>
#include <pcl/filters/filter_indices.h>
-#include <pcl/filters/crop_box.h>
#include <pcl/filters/conditional_removal.h>
#include <pcl/point_types.h>
#include <pcl/io/pcd_io.h>
void cleanWorldFromNans ()
{
world_->is_dense = false;
- std::vector<int> indices;
+ pcl::Indices indices;
pcl::removeNaNFromPointCloud (*world_, *world_, indices);
}
#include <pcl/gpu/kinfu_large_scale/cyclical_buffer.h>
#include <pcl/common/distances.h>
+#include <pcl/common/transforms.h> // for transformPoint, transformPointCloud
#include "internal.h"
std::vector<int> volume_host;
volume_.download (volume_host, cols);
- cloud.points.clear ();
- cloud.points.reserve (10000);
+ cloud.clear ();
+ cloud.reserve (10000);
constexpr int DIVISOR = pcl::device::kinfuLS::DIVISOR; // SHRT_MAX;
xyz.y = point (1);
xyz.z = point (2);
- cloud.points.push_back (xyz);
+ cloud.push_back (xyz);
}
}
dz = 0;
xyz.y = point (1);
xyz.z = point (2);
- cloud.points.push_back (xyz);
+ cloud.push_back (xyz);
}
}
}
xyz.y = point (1);
xyz.z = point (2);
- cloud.points.push_back (xyz);
+ cloud.push_back (xyz);
}
}
} /* if (connected26) */
else()
set(DEFAULT TRUE)
set(REASON)
- set(VTK_USE_FILE "${VTK_USE_FILE}" CACHE INTERNAL "VTK_USE_FILE")
- include("${VTK_USE_FILE}")
include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include")
endif()
capable_ = true;
}
- bool
- getColor (vtkSmartPointer<vtkDataArray> &scalars) const override
+ vtkSmartPointer<vtkDataArray>
+ getColor () const override
{
if (!capable_)
- return (false);
+ return nullptr;
- if (!scalars)
- scalars = vtkSmartPointer<vtkUnsignedCharArray>::New ();
+ auto scalars = vtkSmartPointer<vtkUnsignedCharArray>::New ();
scalars->SetNumberOfComponents (3);
vtkIdType nr_points = static_cast<vtkIdType>(cloud_->size ());
colors[idx + 1] = (*rgb_)[cp].g;
colors[idx + 2] = (*rgb_)[cp].b;
}
- return (true);
+ return scalars;
}
private:
#include <pcl/console/parse.h>
#include <boost/filesystem.hpp>
+#include <boost/date_time/posix_time/posix_time.hpp> // for microsec_clock::local_time
#include <pcl/gpu/kinfu_large_scale/kinfu.h>
#include <pcl/gpu/kinfu_large_scale/raycaster.h>
#include <pcl/gpu/containers/initialization.h>
#include <pcl/common/time.h>
+#include <pcl/common/transforms.h> // for transformPoint
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
#include <pcl/visualization/image_viewer.h>
c = capture_.registerCallback (func2);
}
#else
- PCL_ERROR ("OpenNI2 is disabled in this PCL. Please build PCL with OpenNI2 feature.");
+ PCL_ERROR ("OpenNI2 is disabled in this PCL. Please build PCL with OpenNI2 feature.\n");
#endif
}
else
c = capture_.registerCallback (func2);
}
#else
- PCL_ERROR ("OpenNI is disabled in this PCL. Please build PCL with OpenNI feature.");
+ PCL_ERROR ("OpenNI is disabled in this PCL. Please build PCL with OpenNI feature.\n");
#endif
}
#endif
using ScopeTimeT = pcl::ScopeTime;
-#include <Eigen/Dense>
#include <cmath>
#include <iostream>
#ifdef _WIN32
* \param[in] queries array of centers
* \param[out] result array of results ( one index for each query )
*/
+ PCL_DEPRECATED(1, 14, "use approxNearestSearch() which returns square distances instead")
void approxNearestSearch(const Queries& queries, NeighborIndices& result) const;
+ /** \brief Batch approximate nearest search on GPU
+ * \param[in] queries array of centers
+ * \param[out] result array of results ( one index for each query )
+ * \param[out] sqr_distance corresponding square distances to results from query point
+ */
+ void approxNearestSearch(const Queries& queries, NeighborIndices& result, ResultSqrDists& sqr_distance) const;
+
/** \brief Batch exact k-nearest search on GPU for k == 1 only!
* \param[in] queries array of centers
* \param[in] k number of neighbors (only k == 1 is supported)
*/
void nearestKSearchBatch(const Queries& queries, int k, NeighborIndices& results) const;
+ /** \brief Batch exact k-nearest search on GPU for k == 1 only!
+ * \param[in] queries array of centers
+ * \param[in] k number of neighbors (only k == 1 is supported)
+ * \param[out] results array of results
+ * \param[out] sqr_distances square distances to results
+ */
+ void nearestKSearchBatch(const Queries& queries, int k, NeighborIndices& results, ResultSqrDists& sqr_distances) const;
+
/** \brief Desroys octree and release all resources */
void clear();
private:
* Author: Anatoly Baskeheev, Itseez Ltd, (myname.mysurname@mycompany.com)
*/
-#include <limits>
-
#include "internal.hpp"
#include "pcl/gpu/utils/device/warp.hpp"
-
-#include "utils/copygen.hpp"
+#include "utils/approx_nearest_utils.hpp"
#include "utils/boxutils.hpp"
+#include "utils/copygen.hpp"
#include "utils/scan_block.hpp"
+#include <assert.h>
+#include <limits>
+#include <tuple>
+
+namespace pcl {
+namespace device {
+namespace appnearest_search {
+using PointType = OctreeImpl::PointType;
+
+struct Batch {
+ const PointType* queries;
+
+ const int* indices;
+ const float* points;
+ int points_step; // elem step
+
+ OctreeGlobalWithBox octree;
+
+ int queries_num;
+ mutable int* output;
+ mutable float* sqr_distance;
+};
+
+struct KernelPolicy {
+ enum {
+ CTA_SIZE = 512,
+
+ LOG_WARP_SIZE = 5,
+ WARP_SIZE = 1 << LOG_WARP_SIZE,
+ WARPS_COUNT = CTA_SIZE / WARP_SIZE,
+ };
+};
+
+struct Warp_appNearestSearch {
+public:
+ const Batch& batch;
+
+ int query_index;
+ float3 query;
+ int result_idx;
+ float sqr_dist;
+
+ __device__ __forceinline__
+ Warp_appNearestSearch(const Batch& batch_arg, int query_index_arg)
+ : batch(batch_arg), query_index(query_index_arg)
+ {}
+
+ __device__ __forceinline__ void
+ launch(bool active)
+ {
+ int node_idx = -1;
+ if (active) {
+ PointType q = batch.queries[query_index];
+ query = make_float3(q.x, q.y, q.z);
+
+ const float3& minp = batch.octree.minp;
+ const float3& maxp = batch.octree.maxp;
+
+ node_idx = pcl::device::findNode(minp, maxp, query, batch.octree.nodes);
+ }
+
+ processNode(node_idx);
+
+ if (active)
+ {
+ batch.output[query_index] = batch.indices[result_idx];
+ batch.sqr_distance[query_index] = sqr_dist;
+ }
+ }
+
+private:
+ __device__ __forceinline__ void
+ processNode(const int node_idx)
+ {
+ __shared__ volatile int per_warp_buffer[KernelPolicy::WARPS_COUNT];
+
+ int mask = __ballot_sync(0xFFFFFFFF, node_idx != -1);
+
+ while (mask) {
+ const unsigned int laneId = Warp::laneId();
+
+ const int active_lane = __ffs(mask) - 1; //[0..31]
+ mask &= ~(1 << active_lane);
+
+ // broadcast beg and end
+ int fbeg, fend;
+ if (active_lane == laneId) {
+ fbeg = batch.octree.begs[node_idx];
+ fend = batch.octree.ends[node_idx];
+ }
+ const int beg = __shfl_sync(0xFFFFFFFF, fbeg, active_lane);
+ const int end = __shfl_sync(0xFFFFFFFF, fend, active_lane);
+
+ // broadcast warp_query
+ const float3 active_query =
+ make_float3(__shfl_sync(0xFFFFFFFF, query.x, active_lane),
+ __shfl_sync(0xFFFFFFFF, query.y, active_lane),
+ __shfl_sync(0xFFFFFFFF, query.z, active_lane));
+
+ const auto nearestPoint = NearestWarpKernel<KernelPolicy::CTA_SIZE>(
+ beg, batch.points_step, end - beg, active_query);
+
+ if (active_lane == laneId)
+ {
+ result_idx = beg + nearestPoint.first;
+ sqr_dist = nearestPoint.second;
+ }
+ }
+ }
+
+ template <int CTA_SIZE>
+ __device__ std::pair<int, float>
+ NearestWarpKernel(const int beg,
+ const int field_step,
+ const int length,
+ const float3& active_query)
+
+ {
+ int index = 0;
+ float dist_squared = std::numeric_limits<float>::max();
+
+ // serial step
+ for (int idx = Warp::laneId(); idx < length; idx += Warp::STRIDE) {
+ const float dx = batch.points[beg + idx] - active_query.x;
+ const float dy = batch.points[beg + idx + field_step] - active_query.y;
+ const float dz = batch.points[beg + idx + field_step * 2] - active_query.z;
+
+ const float d2 = dx * dx + dy * dy + dz * dz;
+
+ if (dist_squared > d2) {
+ dist_squared = d2;
+ index = idx;
+ }
+ }
+
+ // find minimum distance among warp threads
+ constexpr unsigned FULL_MASK = 0xFFFFFFFF;
+ static_assert(KernelPolicy::WARP_SIZE <= 8 * sizeof(FULL_MASK),
+ "WARP_SIZE exceeds size of bit_offset.");
+
+ for (unsigned int bit_offset = KernelPolicy::WARP_SIZE / 2; bit_offset > 0;
+ bit_offset /= 2) {
+ const float next = __shfl_down_sync(FULL_MASK, dist_squared, bit_offset);
+ const int next_index = __shfl_down_sync(FULL_MASK, index, bit_offset);
+
+ if (dist_squared > next) {
+ dist_squared = next;
+ index = next_index;
+ }
+ }
+
+ // retrieve index and distance
+ index = __shfl_sync(FULL_MASK, index, 0);
+ dist_squared = __shfl_sync(FULL_MASK, dist_squared, 0);
+
+ return {index, dist_squared};
+ }
+};
+
+__global__ void
+KernelAN(const Batch batch)
+{
+ const int query_index = blockIdx.x * blockDim.x + threadIdx.x;
-namespace pcl { namespace device { namespace appnearest_search
-{
- using PointType = OctreeImpl::PointType;
-
- struct Batch
- {
- const PointType* queries;
-
- const int *indices;
- const float* points;
- int points_step; // elem step
-
- OctreeGlobalWithBox octree;
-
- int queries_num;
- mutable int* output;
- };
-
- struct KernelPolicy
- {
- enum
- {
- CTA_SIZE = 512,
-
- LOG_WARP_SIZE = 5,
- WARP_SIZE = 1 << LOG_WARP_SIZE,
- WARPS_COUNT = CTA_SIZE/WARP_SIZE,
- };
- };
-
- struct Warp_appNearestSearch
- {
- public:
- const Batch& batch;
-
- int query_index;
- float3 query;
- int result_idx;
+ const bool active = query_index < batch.queries_num;
- __device__ __forceinline__ Warp_appNearestSearch(const Batch& batch_arg, int query_index_arg)
- : batch(batch_arg), query_index(query_index_arg){}
+ if (__all_sync(0xFFFFFFFF, active == false))
+ return;
- __device__ __forceinline__ void launch(bool active)
- {
- int node_idx = -1;
- if (active)
- {
- PointType q = batch.queries[query_index];
- query = make_float3(q.x, q.y, q.z);
-
- node_idx = findNode();
- }
+ Warp_appNearestSearch search(batch, query_index);
+ search.launch(active);
+}
- processNode(node_idx);
-
- if (active)
- batch.output[query_index] = batch.indices[result_idx];
- }
-
- private:
+} // namespace appnearest_search
+} // namespace device
+} // namespace pcl
- __device__ __forceinline__ int findNode()
- {
- float3 minp = batch.octree.minp;
- float3 maxp = batch.octree.maxp;
-
- if(query.x < minp.x || query.y < minp.y || query.z < minp.z)
- return 0;
-
- if(query.x > maxp.x || query.y > maxp.y || query.z > maxp.z)
- return 0;
-
- int node_idx = 0;
- int code = CalcMorton(minp, maxp)(query);
- int level = 0;
-
- for(;;)
- {
- int mask_pos = 1 << Morton::extractLevelCode(code, level);
-
- int node = batch.octree.nodes[node_idx];
- int mask = node & 0xFF;
-
- if(__popc(mask) == 0) // leaf
- return node_idx;
-
- if ( (mask & mask_pos) == 0) // no child
- return node_idx;
-
- node_idx = (node >> 8) + __popc(mask & (mask_pos - 1));
- ++level;
- }
- };
-
- __device__ __forceinline__ void processNode(int node_idx)
- {
- __shared__ volatile int per_warp_buffer[KernelPolicy::WARPS_COUNT];
-
- int mask = __ballot_sync(0xFFFFFFFF, node_idx != -1);
-
- while(mask)
- {
- unsigned int laneId = Warp::laneId();
- unsigned int warpId = threadIdx.x/warpSize;
-
- int active_lane = __ffs(mask) - 1; //[0..31]
- mask &= ~(1 << active_lane);
-
- volatile int* warp_buffer = &per_warp_buffer[warpId];
-
- //broadcast beg
- if (active_lane == laneId)
- *warp_buffer = batch.octree.begs[node_idx];
- int beg = *warp_buffer;
-
- //broadcast end
- if (active_lane == laneId)
- *warp_buffer = batch.octree.ends[node_idx];
- int end = *warp_buffer;
-
- float3 active_query;
- volatile float* warp_buffer_float = (float*)&per_warp_buffer[warpId];
-
- //broadcast warp_query
- if (active_lane == laneId)
- *warp_buffer_float = query.x;
- active_query.x = *warp_buffer_float;
-
- if (active_lane == laneId)
- *warp_buffer_float = query.y;
- active_query.y = *warp_buffer_float;
-
- if (active_lane == laneId)
- *warp_buffer_float = query.z;
- active_query.z = *warp_buffer_float;
-
- int offset = NearestWarpKernel<KernelPolicy::CTA_SIZE>(batch.points + beg, batch.points_step, end - beg, active_query);
-
- if (active_lane == laneId)
- result_idx = beg + offset;
- }
- }
-
- template<int CTA_SIZE>
- __device__ __forceinline__ int NearestWarpKernel(const float* points, int points_step, int length, const float3& active_query)
- {
- __shared__ volatile float dist2[CTA_SIZE];
- __shared__ volatile int index[CTA_SIZE];
-
- int tid = threadIdx.x;
- dist2[tid] = std::numeric_limits<float>::max();
-
- //serial step
- for (int idx = Warp::laneId(); idx < length; idx += Warp::STRIDE)
- {
- float dx = points[idx ] - active_query.x;
- float dy = points[idx + points_step ] - active_query.y;
- float dz = points[idx + points_step * 2] - active_query.z;
-
- float d2 = dx * dx + dy * dy + dz * dz;
-
- if (dist2[tid] > d2)
- {
- dist2[tid] = d2;
- index[tid] = idx;
- }
- }
- //parallel step
- unsigned int lane = Warp::laneId();
-
- float mind2 = dist2[tid];
-
- if (lane < 16)
- {
- float next = dist2[tid + 16];
- if (mind2 > next)
- {
- dist2[tid] = mind2 = next;
- index[tid] = index[tid + 16];
- }
- }
-
- if (lane < 8)
- {
- float next = dist2[tid + 8];
- if (mind2 > next)
- {
- dist2[tid] = mind2 = next;
- index[tid] = index[tid + 8];
- }
- }
-
- if (lane < 4)
- {
- float next = dist2[tid + 4];
- if (mind2 > next)
- {
- dist2[tid] = mind2 = next;
- index[tid] = index[tid + 4];
- }
- }
-
- if (lane < 2)
- {
- float next = dist2[tid + 2];
- if (mind2 > next)
- {
- dist2[tid] = mind2 = next;
- index[tid] = index[tid + 2];
- }
- }
-
- if (lane < 1)
- {
- float next = dist2[tid + 1];
- if (mind2 > next)
- {
- dist2[tid] = mind2 = next;
- index[tid] = index[tid + 1];
- }
- }
-
- return index[tid - lane];
- }
- };
-
- __global__ void KernelAN(const Batch batch)
- {
- int query_index = blockIdx.x * blockDim.x + threadIdx.x;
-
- bool active = query_index < batch.queries_num;
-
- if (__all_sync(0xFFFFFFFF, active == false))
- return;
-
- Warp_appNearestSearch search(batch, query_index);
- search.launch(active);
- }
-
-} } }
-
-
-void pcl::device::OctreeImpl::approxNearestSearch(const Queries& queries, NeighborIndices& results) const
+void
+pcl::device::OctreeImpl::approxNearestSearch(const Queries& queries,
+ NeighborIndices& results,
+ BatchResultSqrDists& sqr_distance) const
{
- using BatchType = pcl::device::appnearest_search::Batch;
+ using BatchType = pcl::device::appnearest_search::Batch;
- BatchType batch;
- batch.indices = indices;
- batch.octree = octreeGlobal;
+ BatchType batch;
+ batch.indices = indices;
+ batch.octree = octreeGlobal;
- batch.queries_num = (int)queries.size();
- batch.output = results.data;
+ batch.queries_num = (int)queries.size();
+ batch.output = results.data;
+ batch.sqr_distance = sqr_distance;
- batch.points = points_sorted;
- batch.points_step = (int)points_sorted.elem_step();
- batch.queries = queries;
+ batch.points = points_sorted;
+ batch.points_step = (int)points_sorted.elem_step();
+ batch.queries = queries;
- int block = pcl::device::appnearest_search::KernelPolicy::CTA_SIZE;
- int grid = (batch.queries_num + block - 1) / block;
+ int block = pcl::device::appnearest_search::KernelPolicy::CTA_SIZE;
+ int grid = (batch.queries_num + block - 1) / block;
- cudaSafeCall( cudaFuncSetCacheConfig(pcl::device::appnearest_search::KernelAN, cudaFuncCachePreferL1) );
+ cudaSafeCall(cudaFuncSetCacheConfig(pcl::device::appnearest_search::KernelAN,
+ cudaFuncCachePreferL1));
- pcl::device::appnearest_search::KernelAN<<<grid, block>>>(batch);
- cudaSafeCall( cudaGetLastError() );
- cudaSafeCall( cudaDeviceSynchronize() );
+ pcl::device::appnearest_search::KernelAN<<<grid, block>>>(batch);
+ cudaSafeCall(cudaGetLastError());
+ cudaSafeCall(cudaDeviceSynchronize());
}
\ No newline at end of file
int queries_num;
mutable int* output;
mutable int* sizes;
+ mutable float* sqr_distances;
};
struct KernelPolicy
float3 query;
float min_distance;
+ float min_distance_squared;
int min_idx;
OctreeIterator iterator;
__device__ __forceinline__ Warp_knnSearch(const Batch& batch_arg, const int query_index_arg)
- : batch(batch_arg), query_index(query_index_arg), min_distance(std::numeric_limits<float>::max()), min_idx(0), iterator(batch.octree) { }
+ : batch(batch_arg), query_index(query_index_arg), min_distance(std::numeric_limits<float>::max()), min_distance_squared(std::numeric_limits<float>::max()), min_idx(0), iterator(batch.octree) { }
__device__ __forceinline__ void launch(bool active)
{
}
if (query_index != -1)
{
- batch.output[query_index] = batch.indices[min_idx];
+ batch.output[query_index] = batch.indices[min_idx];
+ batch.sqr_distances[query_index] = min_distance_squared;
if (batch.sizes)
batch.sizes[query_index] = 1;
const auto nearestPoint = NearestWarpKernel<KernelPolicy::CTA_SIZE>(beg, batch.points_step, end - beg, active_query);
if (active_lane == laneId)
- if (min_distance > nearestPoint.second)
+ if (min_distance_squared > nearestPoint.second)
{
- min_distance = nearestPoint.second;
+ min_distance_squared = nearestPoint.second;
min_idx = beg + nearestPoint.first;
+ min_distance = sqrt(nearestPoint.second);
}
}
}
{
int index = 0;
- float dist2 = std::numeric_limits<float>::max();
+ float dist_squared = std::numeric_limits<float>::max();
// serial step
for (int idx = Warp::laneId(); idx < length; idx += Warp::STRIDE) {
const float d2 = dx * dx + dy * dy + dz * dz;
- if (dist2 > d2) {
- dist2 = d2;
+ if (dist_squared > d2) {
+ dist_squared = d2;
index = idx;
}
}
for (unsigned int bit_offset = KernelPolicy::WARP_SIZE / 2; bit_offset > 0;
bit_offset /= 2) {
- const float next = __shfl_down_sync(FULL_MASK, dist2, bit_offset);
+ const float next = __shfl_down_sync(FULL_MASK, dist_squared, bit_offset);
const int next_index = __shfl_down_sync(FULL_MASK, index, bit_offset);
- if (dist2 > next) {
- dist2 = next;
+ if (dist_squared > next) {
+ dist_squared = next;
index = next_index;
}
}
// retrieve index and distance
index = __shfl_sync(FULL_MASK, index, 0);
- const float dist = sqrt(__shfl_sync(FULL_MASK, dist2, 0));
+ dist_squared = __shfl_sync(FULL_MASK, dist_squared, 0);
- return std::make_pair(index, dist);
+ return {index, dist_squared};
}
};
} } }
-void pcl::device::OctreeImpl::nearestKSearchBatch(const Queries& queries, int /*k*/, NeighborIndices& results) const
+void pcl::device::OctreeImpl::nearestKSearchBatch(const Queries& queries, int /*k*/, NeighborIndices& results, BatchResultSqrDists& sqr_distances) const
{
using BatchType = pcl::device::knn_search::Batch;
batch.output = results.data;
batch.sizes = results.sizes;
+ batch.sqr_distances = sqr_distances;
batch.points = points_sorted;
batch.points_step = points_sorted.step()/points_sorted.elem_size;
#include "pcl/gpu/utils/safe_call.hpp"
#include "internal.hpp"
+#include "utils/approx_nearest_utils.hpp"
#include "utils/boxutils.hpp"
#include<algorithm>
#include<limits>
+#include <tuple>
using namespace pcl::gpu;
using namespace pcl::device;
namespace
{
- int getBitsNum(int integer)
- {
- int count = 0;
- while(integer > 0)
- {
- if (integer & 1)
- ++count;
- integer>>=1;
- }
- return count;
- }
-
struct OctreeIteratorHost
{
const static int MAX_LEVELS_PLUS_ROOT = 11;
void pcl::device::OctreeImpl::approxNearestSearchHost(const PointType& query, int& out_index, float& sqr_dist) const
{
- float3 minp = octreeGlobal.minp;
- float3 maxp = octreeGlobal.maxp;
-
- int node_idx = 0;
+ const float3& minp = octreeGlobal.minp;
+ const float3& maxp = octreeGlobal.maxp;
+ const float3 query_point = make_float3(query.x, query.y, query.z);
- bool out_of_root = query.x < minp.x || query.y < minp.y || query.z < minp.z || query.x > maxp.x || query.y > maxp.y || query.z > maxp.z;
-
- if(!out_of_root)
- {
- int code = CalcMorton(minp, maxp)(query);
- int level = 0;
-
- for(;;)
- {
- int mask_pos = 1 << Morton::extractLevelCode(code, level);
-
- int node = host_octree.nodes[node_idx];
- int mask = node & 0xFF;
-
- if(getBitsNum(mask) == 0) // leaf
- break;
-
- if ( (mask & mask_pos) == 0) // no child
- break;
-
- node_idx = (node >> 8) + getBitsNum(mask & (mask_pos - 1));
- ++level;
- }
- }
+ const int node_idx = pcl::device::findNode(minp, maxp, query_point, host_octree.nodes);
int beg = host_octree.begs[node_idx];
int end = host_octree.ends[node_idx];
sqr_dist = d2;
out_index = i;
}
- }
+ }
out_index = host_octree.indices[out_index];
}
void radiusSearch(const Queries& queries, const Indices& indices, float radius, NeighborIndices& results);
- void approxNearestSearch(const Queries& queries, NeighborIndices& results) const;
+ void approxNearestSearch(const Queries& queries, NeighborIndices& results, BatchResultSqrDists& sqr_distance) const;
- void nearestKSearchBatch(const Queries& queries, int k, NeighborIndices& results) const;
+ void nearestKSearchBatch(const Queries& queries, int k, NeighborIndices& results, BatchResultSqrDists& sqr_distances) const;
//just reference
PointCloud points;
int bin, ptx;
OctreeImpl::get_gpu_arch_compiled_for(bin, ptx);
+ if (bin < 0 || ptx < 0)
+ {
+ pcl::gpu::error(R"(cudaFuncGetAttributes() returned a value < 0.
+This is likely a build configuration error.
+Ensure that the proper compute capability is specified in the CUDA_ARCH_BIN cmake variable when building for your GPU.)",
+ __FILE__, __LINE__);
+ }
+
if (bin < 20 && ptx < 20)
pcl::gpu::error("This must be compiled for compute capability >= 2.0", __FILE__, __LINE__);
}
void pcl::gpu::Octree::approxNearestSearch(const Queries& queries, NeighborIndices& results) const
+{
+ ResultSqrDists sqr_distance;
+ approxNearestSearch(queries, results, sqr_distance);
+}
+
+void pcl::gpu::Octree::approxNearestSearch(const Queries& queries, NeighborIndices& results, ResultSqrDists& sqr_distance) const
{
assert(queries.size() > 0);
results.create(static_cast<int> (queries.size()), 1);
+ sqr_distance.create(queries.size());
const OctreeImpl::Queries& q = (const OctreeImpl::Queries&)queries;
- static_cast<OctreeImpl*>(impl)->approxNearestSearch(q, results);
+ static_cast<OctreeImpl*>(impl)->approxNearestSearch(q, results, sqr_distance);
}
void pcl::gpu::Octree::nearestKSearchBatch(const Queries& queries, int k, NeighborIndices& results) const
+{
+ ResultSqrDists sqr_distances;
+ nearestKSearchBatch(queries, k, results, sqr_distances);
+}
+
+void pcl::gpu::Octree::nearestKSearchBatch(const Queries& queries, int k, NeighborIndices& results, ResultSqrDists& sqr_distances) const
{
if (k != 1)
throw pcl::PCLException("OctreeGPU::knnSearch is supported only for k == 1", __FILE__, "", __LINE__);
assert(queries.size() > 0);
results.create(static_cast<int> (queries.size()), k);
+ sqr_distances.create(queries.size() * k);
const OctreeImpl::Queries& q = (const OctreeImpl::Queries&)queries;
- static_cast<OctreeImpl*>(impl)->nearestKSearchBatch(q, k, results);
+ static_cast<OctreeImpl*>(impl)->nearestKSearchBatch(q, k, results, sqr_distances);
}
//////////////////////////////////////////////////////////////////////////////////////
--- /dev/null
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Point Cloud Library (PCL) - www.pointclouds.org
+ * Copyright (c) 2020-, Open Perception
+ *
+ * All rights reserved
+ */
+
+#pragma once
+
+#include "morton.hpp"
+#include <assert.h>
+
+#include <limits>
+#include <tuple>
+#include <bitset>
+
+namespace pcl {
+namespace device {
+
+__device__ __host__ __forceinline__ unsigned
+getBitsNum(const unsigned integer)
+{
+ #ifdef __CUDA_ARCH__
+ return __popc(integer);
+ #else
+ return std::bitset<8*sizeof(integer)> (integer).count();
+ #endif
+}
+
+__host__ __device__ __forceinline__ std::pair<uint3, std::uint8_t>
+nearestVoxel(const float3 query,
+ const unsigned& level,
+ const std::uint8_t& mask,
+ const float3& minp,
+ const float3& maxp,
+ const uint3& index)
+{
+ assert(mask != 0);
+ // identify closest voxel
+ float closest_distance = std::numeric_limits<float>::max();
+ unsigned closest_index = 0;
+ uint3 closest = make_uint3(0, 0, 0);
+
+ for (unsigned i = 0; i < 8; ++i) {
+ if ((mask & (1 << i)) == 0) // no child
+ continue;
+
+ const uint3 child = make_uint3((index.x << 1) + (i & 1),
+ (index.y << 1) + ((i >> 1) & 1),
+ (index.z << 1) + ((i >> 2) & 1));
+
+ // find center of child cell
+ const unsigned voxel_width_scale_factor = 1 << (level + 2);
+ const float3 voxel_center =
+ make_float3(minp.x + (maxp.x - minp.x) * (2 * child.x + 1) / voxel_width_scale_factor,
+ minp.y + (maxp.y - minp.y) * (2 * child.y + 1) / voxel_width_scale_factor,
+ minp.z + (maxp.z - minp.z) * (2 * child.z + 1) / voxel_width_scale_factor);
+
+ // compute distance to centroid
+ const float3 dist = make_float3(
+ voxel_center.x - query.x, voxel_center.y - query.y, voxel_center.z - query.z);
+
+ const float distance_to_query = dist.x * dist.x + dist.y * dist.y + dist.z * dist.z;
+
+ // compare distance
+ if (distance_to_query < closest_distance) {
+ closest_distance = distance_to_query;
+ closest_index = i;
+ closest = child;
+ }
+ }
+
+ return {closest, 1 << closest_index};
+}
+
+#pragma hd_warning_disable
+template <typename T>
+__device__ __host__ int
+findNode(const float3 minp, const float3 maxp, const float3 query, const T nodes)
+{
+ size_t node_idx = 0;
+ const auto code = CalcMorton(minp, maxp)(query);
+ unsigned level = 0;
+
+ bool voxel_traversal = false;
+ uint3 index = Morton::decomposeCode(code);
+ std::uint8_t mask_pos;
+
+ while (true) {
+ const auto node = nodes[node_idx];
+ const std::uint8_t mask = node & 0xFF;
+
+ if (!mask) // leaf
+ return node_idx;
+
+ if (voxel_traversal) // empty voxel already encountered, performing nearest-centroid
+ // based traversal
+ {
+ const auto nearest_voxel = nearestVoxel(query, level, mask, minp, maxp, index);
+ index = nearest_voxel.first;
+ mask_pos = nearest_voxel.second;
+ }
+ else {
+ mask_pos = 1 << Morton::extractLevelCode(code, level);
+
+ if (!(mask & mask_pos)) // child doesn't exist
+ {
+ const auto remaining_depth = Morton::levels - level;
+ index.x >>= remaining_depth;
+ index.y >>= remaining_depth;
+ index.z >>= remaining_depth;
+
+ voxel_traversal = true;
+ const auto nearest_voxel = nearestVoxel(query, level, mask, minp, maxp, index);
+ index = nearest_voxel.first;
+ mask_pos = nearest_voxel.second;
+ }
+ }
+ node_idx = (node >> 8) + getBitsNum(mask & (mask_pos - 1));
+ ++level;
+ }
+}
+} // namespace device
+} // namespace pcl
cell_z = compactBits(code, 2);
}
+ __device__ __host__ __forceinline__
+ static uint3 decomposeCode(code_t code)
+ {
+ return make_uint3 (compactBits(code, 0), compactBits(code, 1), compactBits(code, 2));
+ }
+
__host__ __device__ __forceinline__
static code_t extractLevelCode(code_t code, int level)
{
}
__device__ __host__ __forceinline__ Morton::code_t operator()(const float3& p) const
- {
- int cellx = min((int)std::floor(depth_mult * (p.x - minp_.x)/dims_.x), depth_mult - 1);
- int celly = min((int)std::floor(depth_mult * (p.y - minp_.y)/dims_.y), depth_mult - 1);
- int cellz = min((int)std::floor(depth_mult * (p.z - minp_.z)/dims_.z), depth_mult - 1);
+ {
+ //Using floorf due to changes to MSVC 16.9. See details here: https://devtalk.blender.org/t/cuda-compile-error-windows-10/17886/4
+ //floorf is without std:: see why here: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79700
+ const int cellx = min((int)floorf(depth_mult * min(1.f, max(0.f, (p.x - minp_.x)/dims_.x))), depth_mult - 1);
+ const int celly = min((int)floorf(depth_mult * min(1.f, max(0.f, (p.y - minp_.y)/dims_.y))), depth_mult - 1);
+ const int cellz = min((int)floorf(depth_mult * min(1.f, max(0.f, (p.z - minp_.z)/dims_.z))), depth_mult - 1);
return Morton::createCode(cellx, celly, cellz);
}
}
}
-#endif /* PCL_GPU_OCTREE_MORTON_HPP */
\ No newline at end of file
+#endif /* PCL_GPU_OCTREE_MORTON_HPP */
const Blob2& blob = sorted[part_label][part_lid];
// iterate over the number of pixels that are part of this label
- const std::vector<int>& indices = blob.indices.indices;
+ const auto& indices = blob.indices.indices;
tree.indices.indices.insert(tree.indices.indices.end(), indices.begin(), indices.end());
if(nr_children == 0)
const Blob2& blob = sorted[part_label][part_lid];
// iterate over the number of pixels that are part of this label
- const std::vector<int>& indices = blob.indices.indices;
+ const auto& indices = blob.indices.indices;
tree.indices.indices.insert(tree.indices.indices.end(), indices.begin(), indices.end());
if(nr_children == 0)
allocate_buffers (int rows = 480, int cols = 640);
void
- shs5 (const pcl::PointCloud<PointT> &cloud, const std::vector<int>& indices, unsigned char *mask);
+ shs5 (const pcl::PointCloud<PointT> &cloud, const pcl::Indices& indices, unsigned char *mask);
//!!! only for debug purposes TODO: remove this.
friend class PeoplePCDApp;
cloud_out.width = input_gray.width;
cloud_out.height = input_gray.height;
- cloud_out.points.resize (input_gray.size ());
+ cloud_out.resize (input_gray.size ());
PCL_ASSERT_ERROR_PRINT_RETURN(!gpu_allocator.isCounting(),"retcode=NCV_NULL_PTR", NCV_NULL_PTR);
// Fill in the probabilities
for(const auto &inlier_index : inlier_indices) // iterate over all found planes
{
- for(const int &index : inlier_index.indices) // iterate over all the indices in that plane
+ for(const auto &index : inlier_index.indices) // iterate over all the indices in that plane
{
P_l_host_[index].probs[pcl::gpu::people::Background] = 1.f; // set background at max
}
PCL_DEBUG("[pcl::gpu::people::OrganizedPlaneDetector::allocate_buffers] : (D) : Called\n");
// Create histogram on host
- P_l_host_.points.resize(rows*cols);
+ P_l_host_.resize(rows*cols);
P_l_host_.width = cols;
P_l_host_.height = rows;
- P_l_host_prev_.points.resize(rows*cols);
+ P_l_host_prev_.resize(rows*cols);
P_l_host_prev_.width = cols;
P_l_host_prev_.height = rows;
cloud_host_.width = cols;
cloud_host_.height = rows;
- cloud_host_.points.resize(cols * rows);
+ cloud_host_.resize(cols * rows);
cloud_host_.is_dense = false;
cloud_host_color_.width = cols;
hue_host_.width = cols;
hue_host_.height = rows;
- hue_host_.points.resize(cols * rows);
+ hue_host_.resize(cols * rows);
hue_host_.is_dense = false;
depth_host_.width = cols;
depth_host_.height = rows;
- depth_host_.points.resize(cols * rows);
+ depth_host_.resize(cols * rows);
depth_host_.is_dense = false;
flowermat_host_.width = cols;
flowermat_host_.height = rows;
- flowermat_host_.points.resize(cols * rows);
+ flowermat_host_.resize(cols * rows);
flowermat_host_.is_dense = false;
cloud_device_.create(rows, cols);
Tree2 t;
buildTree(sorted, cloud_host_, Neck, c, t);
- const std::vector<int>& seed = t.indices.indices;
+ const auto& seed = t.indices.indices;
- std::fill(flowermat_host_.points.begin(), flowermat_host_.points.end(), 0);
+ std::fill(flowermat_host_.begin(), flowermat_host_.end(), 0);
{
//ScopeTime time("shs");
shs5(cloud_host_, seed, &flowermat_host_[0]);
Tree2 t;
buildTree(sorted, cloud_host_, Neck, c, t, person_attribs_);
- const std::vector<int>& seed = t.indices.indices;
+ const auto& seed = t.indices.indices;
- std::fill(flowermat_host_.points.begin(), flowermat_host_.points.end(), 0);
+ std::fill(flowermat_host_.begin(), flowermat_host_.end(), 0);
{
//ScopeTime time("shs");
shs5(cloud_host_, seed, &flowermat_host_[0]);
}
void
-pcl::gpu::people::PeopleDetector::shs5(const pcl::PointCloud<PointT> &cloud, const std::vector<int>& indices, unsigned char *mask)
+pcl::gpu::people::PeopleDetector::shs5(const pcl::PointCloud<PointT> &cloud, const pcl::Indices& indices, unsigned char *mask)
{
pcl::device::Intr intr(fx_, fy_, cx_, cy_);
intr.setDefaultPPIfIncorrect(cloud.width, cloud.height);
else()
set(DEFAULT TRUE)
set(REASON)
- set(VTK_USE_FILE "${VTK_USE_FILE}" CACHE INTERNAL "VTK_USE_FILE")
- include("${VTK_USE_FILE}")
include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include")
endif()
depth_view_.setPosition (650, 0);
cmap_device_.create(ROWS, COLS);
- cmap_host_.points.resize(COLS * ROWS);
+ cmap_host_.resize(COLS * ROWS);
depth_device_.create(ROWS, COLS);
image_device_.create(ROWS, COLS);
- depth_host_.points.resize(COLS * ROWS);
+ depth_host_.resize(COLS * ROWS);
- rgba_host_.points.resize(COLS * ROWS);
+ rgba_host_.resize(COLS * ROWS);
rgb_host_.resize(COLS * ROWS * 3);
pcl::gpu::people::uploadColorMap(color_map_);
int c;
cmap_host_.width = cmap_device_.cols();
cmap_host_.height = cmap_device_.rows();
- cmap_host_.points.resize(cmap_host_.width * cmap_host_.height);
+ cmap_host_.resize(cmap_host_.width * cmap_host_.height);
cmap_device_.download(cmap_host_.points, c);
final_view_.showRGBImage<pcl::RGB>(cmap_host_);
{
depth_host_.width = people_detector_.depth_device1_.cols();
depth_host_.height = people_detector_.depth_device1_.rows();
- depth_host_.points.resize(depth_host_.width * depth_host_.height);
+ depth_host_.resize(depth_host_.width * depth_host_.height);
people_detector_.depth_device1_.download(depth_host_.points, c);
}
const unsigned short *data = depth_wrapper->getDepthMetaData().Data();
depth_device_.upload(data, s, h, w);
- depth_host_.points.resize(w *h);
+ depth_host_.resize(w *h);
depth_host_.width = w;
depth_host_.height = h;
std::copy(data, data + w * h, &depth_host_[0]);
image_wrapper->fillRGB(w, h, (unsigned char*)&rgb_host_[0]);
// convert to rgba, TODO image_wrapper should be updated to support rgba directly
- rgba_host_.points.resize(w * h);
+ rgba_host_.resize(w * h);
rgba_host_.width = w;
rgba_host_.height = h;
for(std::size_t i = 0; i < rgba_host_.size(); ++i)
tree_files.resize(num_trees);
if (num_trees == 0 || num_trees > 4)
{
- PCL_ERROR("[Main] : Invalid number of trees");
+ PCL_ERROR("[Main] : Invalid number of trees\n");
print_help();
return -1;
}
#include <pcl/gpu/people/people_detector.h>
#include <pcl/gpu/people/colormap.h>
#include <pcl/visualization/image_viewer.h>
-#include <pcl/search/pcl_search.h>
#include <Eigen/Core>
#include <pcl/io/png_io.h>
char val = static_cast<char> (value8);
pcl::RGB p;
p.r = val; p.b = val; p.g = val;
- rgb.points.push_back(p);
+ rgb.push_back(p);
}
rgb.width = histograms.width;
rgb.height = histograms.height;
set(build TRUE)
PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ON)
PCL_SUBSYS_DEPEND(build "${SUBSYS_NAME}" DEPS ${SUBSYS_DEPS})
+PCL_SET_SUBSYS_INCLUDE_DIR("${SUBSYS_NAME}" "${SUBSYS_PATH}")
mark_as_advanced("BUILD_${SUBSYS_NAME}")
PCL_ADD_DOC("${SUBSYS_NAME}")
set(LIB_NAME "pcl_${SUBSYS_NAME}")
include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include")
PCL_ADD_LIBRARY(${LIB_NAME} COMPONENT ${SUBSYS_NAME} SOURCES ${srcs} ${incs} ${impl_incs})
-target_link_libraries("${LIB_NAME}" pcl_gpu_octree pcl_gpu_utils pcl_gpu_containers)
+target_link_libraries("${LIB_NAME}" pcl_common pcl_gpu_octree pcl_gpu_utils pcl_gpu_containers)
PCL_MAKE_PKGCONFIG(${LIB_NAME} COMPONENT ${SUBSYS_NAME} DESC ${SUBSYS_DESC} PCL_DEPS ${SUBSYS_DEPS})
# Install include files
{
namespace gpu
{
- void
- extractEuclideanClusters (const pcl::PointCloud<pcl::PointXYZ>::Ptr &host_cloud_,
+ template <typename PointT> void
+ extractEuclideanClusters (const typename pcl::PointCloud<PointT>::Ptr &host_cloud_,
const pcl::gpu::Octree::Ptr &tree,
float tolerance,
std::vector<PointIndices> &clusters,
* \author Koen Buys, Radu Bogdan Rusu
* \ingroup segmentation
*/
+ template <typename PointT>
class EuclideanClusterExtraction
{
public:
- using PointType = pcl::PointXYZ;
- using PointCloudHost = pcl::PointCloud<pcl::PointXYZ>;
- using PointCloudHostPtr = PointCloudHost::Ptr;
- using PointCloudHostConstPtr = PointCloudHost::ConstPtr;
+ using PointCloudHost = pcl::PointCloud<PointT>;
+ using PointCloudHostPtr = typename PointCloudHost::Ptr;
+ using PointCloudHostConstPtr = typename PointCloudHost::ConstPtr;
using PointIndicesPtr = PointIndices::Ptr;
using PointIndicesConstPtr = PointIndices::ConstPtr;
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/** \brief Empty constructor. */
- EuclideanClusterExtraction () : min_pts_per_cluster_ (1), max_pts_per_cluster_ (std::numeric_limits<int>::max ())
- {};
+ EuclideanClusterExtraction () = default;
/** \brief the destructor */
/* ~EuclideanClusterExtraction ()
GPUTreePtr tree_;
/** \brief The spatial cluster tolerance as a measure in the L2 Euclidean space. */
- double cluster_tolerance_;
+ double cluster_tolerance_ {0};
/** \brief The minimum number of points that a cluster needs to contain in order to be considered valid (default = 1). */
- int min_pts_per_cluster_;
+ int min_pts_per_cluster_ {1};
/** \brief The maximum number of points that a cluster needs to contain in order to be considered valid (default = MAXINT). */
- int max_pts_per_cluster_;
+ int max_pts_per_cluster_ {std::numeric_limits<int>::max()};
/** \brief Class getName method. */
virtual std::string getClassName () const { return ("gpu::EuclideanClusterExtraction"); }
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/** \brief Empty constructor. */
- EuclideanLabeledClusterExtraction () : min_pts_per_cluster_ (1), max_pts_per_cluster_ (std::numeric_limits<int>::max ())
- {};
+ EuclideanLabeledClusterExtraction () = default;
/** \brief Provide a pointer to the search object.
* \param tree a pointer to the spatial search object.
GPUTreePtr tree_;
/** \brief The spatial cluster tolerance as a measure in the L2 Euclidean space. */
- double cluster_tolerance_;
+ double cluster_tolerance_ {0};
/** \brief The minimum number of points that a cluster needs to contain in order to be considered valid (default = 1). */
- int min_pts_per_cluster_;
+ int min_pts_per_cluster_ {1};
/** \brief The maximum number of points that a cluster needs to contain in order to be considered valid (default = MAXINT). */
- int max_pts_per_cluster_;
+ int max_pts_per_cluster_ {std::numeric_limits<int>::max()};
/** \brief Class getName method. */
virtual std::string getClassName () const { return ("gpu::EuclideanLabeledClusterExtraction"); }
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/** \brief Empty constructor. */
- SeededHueSegmentation () : delta_hue_ (0.0)
- {};
+ SeededHueSegmentation () = default;
/** \brief Provide a pointer to the search object.
* \param tree a pointer to the spatial search object.
GPUTreePtr tree_;
/** \brief The spatial cluster tolerance as a measure in the L2 Euclidean space. */
- double cluster_tolerance_;
+ double cluster_tolerance_ {0};
/** \brief The allowed difference on the hue*/
- float delta_hue_;
+ float delta_hue_ {0.0};
/** \brief Class getName method. */
virtual std::string getClassName () const { return ("gpu::SeededHueSegmentation"); }
* @author: Koen Buys
*/
-#ifndef PCL_GPU_SEGMENTATION_IMPL_EXTRACT_CLUSTERS_H_
-#define PCL_GPU_SEGMENTATION_IMPL_EXTRACT_CLUSTERS_H_
-
+#pragma once
+#include <pcl/common/copy_point.h>
#include <pcl/gpu/segmentation/gpu_extract_clusters.h>
+namespace pcl {
+namespace detail {
+
+//// Downloads only the neccssary cluster indices from the device to the host.
void
-pcl::gpu::extractEuclideanClusters (const pcl::PointCloud<pcl::PointXYZ>::Ptr &host_cloud_,
+economical_download(const pcl::gpu::NeighborIndices& source_indices,
+ const pcl::Indices& buffer_indices,
+ std::size_t buffer_size,
+ pcl::Indices& downloaded_indices);
+} // namespace detail
+} // namespace pcl
+
+template <typename PointT> void
+pcl::gpu::extractEuclideanClusters (const typename pcl::PointCloud<PointT>::Ptr &host_cloud_,
const pcl::gpu::Octree::Ptr &tree,
float tolerance,
std::vector<PointIndices> &clusters,
// Create a bool vector of processed point indices, and initialize it to false
// cloud is a DeviceArray<PointType>
+ PCL_DEBUG("[pcl::gpu::extractEuclideanClusters]\n");
std::vector<bool> processed (host_cloud_->size (), false);
int max_answers;
max_answers = host_cloud_->size();
else
max_answers = max_pts_per_cluster;
- std::cout << "Max_answers: " << max_answers << std::endl;
+ PCL_DEBUG("Max_answers: %i\n", max_answers);
// to store the current cluster
pcl::PointIndices r;
DeviceArray<PointXYZ> queries_device_buffer;
queries_device_buffer.create(max_answers);
+ // Host buffer for results
+ pcl::Indices sizes, data, cpu_tmp;
// Process all points in the cloud
for (std::size_t i = 0; i < host_cloud_->size (); ++i)
{
// Create the query queue on the device, point based not indices
pcl::gpu::Octree::Queries queries_device;
// Create the query queue on the host
- pcl::PointCloud<pcl::PointXYZ>::VectorType queries_host;
+ pcl::PointCloud<pcl::PointXYZ>::VectorType queries_host;
+
+ // Buffer in a new PointXYZ type
+ PointXYZ p;
+ pcl::copyPoint((*host_cloud_)[i], p);
+
// Push the starting point in the vector
- queries_host.push_back ((*host_cloud_)[i]);
+ queries_host.push_back (p);
// Clear vector
r.indices.clear();
// Push the starting point in
// once the area stop growing, stop also iterating.
do
{
- // Host buffer for results
- std::vector<int> sizes, data;
-
+ data.clear();
// if the number of queries is not high enough implement search on Host here
if(queries_host.size () <= 10) ///@todo: adjust this to a variable number settable with method
{
- std::cout << " CPU: ";
+ PCL_DEBUG(" CPU: ");
for(std::size_t p = 0; p < queries_host.size (); p++)
{
// Execute the radiusSearch on the host
- tree->radiusSearchHost(queries_host[p], tolerance, data, max_answers);
- }
- // Store the previously found number of points
- previous_found_points = found_points;
- // Clear queries list
- queries_host.clear();
-
- //std::unique(data.begin(), data.end());
- if(data.size () == 1)
- continue;
-
- // Process the results
- for(std::size_t i = 0; i < data.size (); i++)
- {
- if(processed[data[i]])
- continue;
- processed[data[i]] = true;
- queries_host.push_back ((*host_cloud_)[data[i]]);
- found_points++;
- r.indices.push_back(data[i]);
+ cpu_tmp.clear();
+ tree->radiusSearchHost(queries_host[p], tolerance, cpu_tmp, max_answers);
+ std::copy(cpu_tmp.begin(), cpu_tmp.end(), std::back_inserter(data));
}
}
-
// If number of queries is high enough do it here
else
{
- std::cout << " GPU: ";
+ PCL_DEBUG(" GPU: ");
+ sizes.clear();
// Copy buffer
queries_device = DeviceArray<PointXYZ>(queries_device_buffer.ptr(),queries_host.size());
// Move queries to GPU
// Execute search
tree->radiusSearch(queries_device, tolerance, max_answers, result_device);
// Copy results from GPU to Host
- result_device.sizes.download (sizes);
- result_device.data.download (data);
- // Store the previously found number of points
- previous_found_points = found_points;
- // Clear queries list
- queries_host.clear();
- for(std::size_t qp = 0; qp < sizes.size (); qp++)
- {
- for(int qp_r = 0; qp_r < sizes[qp]; qp_r++)
- {
- if(processed[data[qp_r + qp * max_answers]])
- continue;
- processed[data[qp_r + qp * max_answers]] = true;
- queries_host.push_back ((*host_cloud_)[data[qp_r + qp * max_answers]]);
- found_points++;
- r.indices.push_back(data[qp_r + qp * max_answers]);
- }
- }
+ result_device.sizes.download(sizes);
+ pcl::detail::economical_download(result_device, sizes, max_answers, data);
+ }
+ // Store the previously found number of points
+ previous_found_points = found_points;
+ // Clear queries list
+ queries_host.clear();
+
+ if(data.size () == 1)
+ continue;
+
+ // Process the results
+ for(auto idx : data)
+ {
+ if(processed[idx])
+ continue;
+ processed[idx] = true;
+ PointXYZ p;
+ pcl::copyPoint((*host_cloud_)[idx], p);
+ queries_host.push_back (p);
+ found_points++;
+ r.indices.push_back(idx);
}
- std::cout << " data.size: " << data.size() << " foundpoints: " << found_points << " previous: " << previous_found_points;
- std::cout << " new points: " << found_points - previous_found_points << " next queries size: " << queries_host.size() << std::endl;
+ PCL_DEBUG(" data.size: %i, foundpoints: %i, previous: %i", data.size() ,
+ found_points, previous_found_points);
+ PCL_DEBUG(" new points: %i, next queries size: %i\n", found_points - previous_found_points,
+ queries_host.size());
}
while (previous_found_points < found_points);
// If this queue is satisfactory, add to the clusters
}
}
-void
-pcl::gpu::EuclideanClusterExtraction::extract (std::vector<pcl::PointIndices> &clusters)
+template <typename PointT> void
+pcl::gpu::EuclideanClusterExtraction<PointT>::extract (std::vector<pcl::PointIndices> &clusters)
{
/*
// Initialize the GPU search tree
}
*/
// Extract the actual clusters
- extractEuclideanClusters (host_cloud_, tree_, cluster_tolerance_, clusters, min_pts_per_cluster_, max_pts_per_cluster_);
- std::cout << "INFO: end of extractEuclideanClusters " << std::endl;
+ extractEuclideanClusters<PointT> (host_cloud_, tree_, cluster_tolerance_, clusters, min_pts_per_cluster_, max_pts_per_cluster_);
+ PCL_DEBUG("INFO: end of extractEuclideanClusters\n");
// Sort the clusters based on their size (largest one first)
//std::sort (clusters.rbegin (), clusters.rend (), comparePointClusters);
}
-#endif //PCL_GPU_SEGMENTATION_IMPL_EXTRACT_CLUSTERS_H_
+#define PCL_INSTANTIATE_extractEuclideanClusters(T) template void PCL_EXPORTS pcl::gpu::extractEuclideanClusters<T> (const typename pcl::PointCloud<T>::Ptr &, const pcl::gpu::Octree::Ptr &,float, std::vector<PointIndices> &, unsigned int, unsigned int);
+#define PCL_INSTANTIATE_EuclideanClusterExtraction(T) template class PCL_EXPORTS pcl::gpu::EuclideanClusterExtraction<T>;
*
*/
-#ifndef PCL_GPU_SEGMENTATION_IMPL_EXTRACT_LABELED_CLUSTERS_H_
-#define PCL_GPU_SEGMENTATION_IMPL_EXTRACT_LABELED_CLUSTERS_H_
+#pragma once
#include <pcl/gpu/segmentation/gpu_extract_labeled_clusters.h>
#define PCL_INSTANTIATE_extractLabeledEuclideanClusters(T) template void PCL_EXPORTS pcl::gpu::extractLabeledEuclideanClusters<T> (const typename pcl::PointCloud<T>::Ptr &, const pcl::gpu::Octree::Ptr &,float, std::vector<PointIndices> &, unsigned int, unsigned int);
#define PCL_INSTANTIATE_EuclideanLabeledClusterExtraction(T) template class PCL_EXPORTS pcl::gpu::EuclideanLabeledClusterExtraction<T>;
-#endif //PCL_GPU_SEGMENTATION_IMPL_EXTRACT_LABELED_CLUSTERS_H_
*
*/
+#include <pcl/gpu/segmentation/impl/gpu_extract_clusters.hpp>
+#include <pcl/gpu/segmentation/impl/gpu_extract_labeled_clusters.hpp>
#include <pcl/impl/instantiate.hpp>
#include <pcl/point_types.h>
-//#include <pcl/gpu/segmentation/gpu_extract_labeled_clusters.h>
-#include <pcl/gpu/segmentation/impl/gpu_extract_labeled_clusters.hpp>
// Instantiations of specific point types
+PCL_INSTANTIATE(extractEuclideanClusters, PCL_XYZ_POINT_TYPES);
+PCL_INSTANTIATE(EuclideanClusterExtraction, PCL_XYZ_POINT_TYPES);
PCL_INSTANTIATE(extractLabeledEuclideanClusters, PCL_XYZL_POINT_TYPES);
PCL_INSTANTIATE(EuclideanLabeledClusterExtraction, PCL_XYZL_POINT_TYPES);
+
+void
+pcl::detail::economical_download(const pcl::gpu::NeighborIndices& source_indices,
+ const pcl::Indices& buffer_indices,
+ std::size_t buffer_size,
+ pcl::Indices& downloaded_indices)
+{
+ std::vector<int> tmp;
+ for (std::size_t qp = 0; qp < buffer_indices.size(); qp++) {
+ std::size_t begin = qp * buffer_size;
+ tmp.resize(buffer_indices[qp]);
+ source_indices.data.download(&tmp[0], begin, buffer_indices[qp]);
+ downloaded_indices.insert(downloaded_indices.end(), tmp.begin(), tmp.end());
+ }
+}
#include <pcl/gpu/kinfu/pixel_rgb.h>
#include <pcl/tracking/particle_filter.h>
-#include <Eigen/Dense>
-
#include "internal.h"
namespace pcl
if(WIN32)
PCL_SUBSYS_DEPEND(build "${SUBSYS_NAME}" DEPS ${SUBSYS_DEPS} OPT_DEPS openni openni2 ensenso davidSDK dssdk rssdk rssdk2 pcap png vtk)
else()
- PCL_SUBSYS_DEPEND(build "${SUBSYS_NAME}" DEPS ${SUBSYS_DEPS} OPT_DEPS openni openni2 ensenso davidSDK dssdk pcap png vtk libusb-1.0)
+ PCL_SUBSYS_DEPEND(build "${SUBSYS_NAME}" DEPS ${SUBSYS_DEPS} OPT_DEPS openni openni2 ensenso davidSDK dssdk pcap png vtk libusb)
endif()
PCL_ADD_DOC("${SUBSYS_NAME}")
)
endif()
-# RSSDK2 is restricted to Windows until it is functional under other OSs
-if(WIN32 AND WITH_RSSDK2)
+if(WITH_RSSDK2)
set(RSSDK2_GRABBER_INCLUDES
include/pcl/io/real_sense_2_grabber.h
)
)
endif()
-if(LIBUSB_1_FOUND)
+if(LIBUSB_FOUND)
set(DINAST_GRABBER_INCLUDES
include/pcl/io/dinast_grabber.h
)
)
PCL_ADD_LIBRARY(pcl_io_ply COMPONENT ${SUBSYS_NAME} SOURCES ${PLY_SOURCES} ${PLY_INCLUDES})
+if(MINGW)
+ # libws2_32 isn't added by default for MinGW
+ target_link_libraries(pcl_io_ply ws2_32)
+endif()
PCL_ADD_INCLUDES("${SUBSYS_NAME}" "${SUBSYS_NAME}/ply" ${PLY_INCLUDES})
target_include_directories(pcl_io_ply PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include")
src/hdl_grabber.cpp
src/vlp_grabber.cpp
src/robot_eye_grabber.cpp
- src/file_io.cpp
src/auto_io.cpp
src/io_exception.cpp
${VTK_IO_SOURCE}
set(LIB_NAME "pcl_${SUBSYS_NAME}")
add_definitions(${VTK_DEFINES})
+
PCL_ADD_LIBRARY(${LIB_NAME} COMPONENT ${SUBSYS_NAME} SOURCES ${srcs} ${incs} ${compression_incs} ${impl_incs} ${OPENNI_INCLUDES} ${OPENNI2_INCLUDES})
+
target_include_directories(${LIB_NAME} PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include")
-link_directories(${VTK_LINK_DIRECTORIES})
-target_link_libraries("${LIB_NAME}" pcl_common pcl_io_ply ${VTK_LIBRARIES})
+
+target_link_libraries("${LIB_NAME}" pcl_common pcl_io_ply)
+if(VTK_FOUND)
+ if(${VTK_VERSION} VERSION_LESS 9.0)
+ link_directories(${VTK_LINK_DIRECTORIES})
+ target_link_libraries("${LIB_NAME}" ${VTK_LIBRARIES})
+ else()
+ target_link_libraries("${LIB_NAME}"
+ VTK::IOImage
+ VTK::IOGeometry
+ VTK::IOPLY)
+ endif()
+endif()
+
if(PNG_FOUND)
target_link_libraries("${LIB_NAME}" ${PNG_LIBRARIES})
endif()
-if(LIBUSB_1_FOUND)
- target_link_libraries("${LIB_NAME}" ${LIBUSB_1_LIBRARIES})
+if(LIBUSB_FOUND)
+ target_link_libraries("${LIB_NAME}" libusb::libusb)
endif()
if(WITH_OPENNI2)
if(WITH_OPENNI)
target_link_libraries("${LIB_NAME}" ${OPENNI_LIBRARIES})
+ if(VTK_FOUND AND (NOT ${VTK_VERSION} VERSION_LESS 9.0))
+ target_link_libraries("${LIB_NAME}" VTK::FiltersCore VTK::FiltersGeneral)
+ endif()
endif()
if(WITH_FZAPI)
target_link_libraries(${LIB_NAME} ${RSSDK_LIBRARIES})
endif()
-# RSSDK2 is restricted to Windows until it is functional under other OSs
-if(WIN32 AND WITH_RSSDK2)
+if(WITH_RSSDK2)
target_link_libraries(${LIB_NAME} ${RSSDK2_LIBRARIES})
endif()
#pragma once
-#include <cstdio>
-#include <cstring>
#include <iostream>
-#include <iterator>
#include <vector>
namespace pcl
* \param inputCloud_arg input point cloud
* */
void
- encodeAverageOfPoints (const typename std::vector<int>& indexVector_arg, unsigned char rgba_offset_arg, PointCloudConstPtr inputCloud_arg)
+ encodeAverageOfPoints (const Indices& indexVector_arg, unsigned char rgba_offset_arg, PointCloudConstPtr inputCloud_arg)
{
- unsigned int avgRed = 0;
- unsigned int avgGreen = 0;
- unsigned int avgBlue = 0;
+ uindex_t avgRed = 0;
+ uindex_t avgGreen = 0;
+ uindex_t avgBlue = 0;
// iterate over points
- std::size_t len = indexVector_arg.size ();
- for (std::size_t i = 0; i < len; i++)
+ for (const auto& idx: indexVector_arg)
{
// get color information from points
- const int& idx = indexVector_arg[i];
const char* idxPointPtr = reinterpret_cast<const char*> (&(*inputCloud_arg)[idx]);
const int& colorInt = *reinterpret_cast<const int*> (idxPointPtr+rgba_offset_arg);
}
+ const uindex_t len = static_cast<uindex_t> (indexVector_arg.size());
// calculated average color information
if (len > 1)
{
- avgRed /= static_cast<unsigned int> (len);
- avgGreen /= static_cast<unsigned int> (len);
- avgBlue /= static_cast<unsigned int> (len);
+ avgRed /= len;
+ avgGreen /= len;
+ avgBlue /= len;
}
// remove least significant bits
* \param inputCloud_arg input point cloud
* */
void
- encodePoints (const typename std::vector<int>& indexVector_arg, unsigned char rgba_offset_arg, PointCloudConstPtr inputCloud_arg)
+ encodePoints (const Indices& indexVector_arg, unsigned char rgba_offset_arg, PointCloudConstPtr inputCloud_arg)
{
- unsigned int avgRed;
- unsigned int avgGreen;
- unsigned int avgBlue;
+ uindex_t avgRed;
+ uindex_t avgGreen;
+ uindex_t avgBlue;
// initialize
avgRed = avgGreen = avgBlue = 0;
// iterate over points
- std::size_t len = indexVector_arg.size ();
- for (std::size_t i = 0; i < len; i++)
+ for (const auto& idx: indexVector_arg)
{
// get color information from point
- const int& idx = indexVector_arg[i];
const char* idxPointPtr = reinterpret_cast<const char*> (&(*inputCloud_arg)[idx]);
const int& colorInt = *reinterpret_cast<const int*> (idxPointPtr+rgba_offset_arg);
}
+ const uindex_t len = static_cast<uindex_t> (indexVector_arg.size());
if (len > 1)
{
unsigned char diffRed;
unsigned char diffBlue;
// calculated average color information
- avgRed /= static_cast<unsigned int> (len);
- avgGreen /= static_cast<unsigned int> (len);
- avgBlue /= static_cast<unsigned int> (len);
+ avgRed /= len;
+ avgGreen /= len;
+ avgBlue /= len;
// iterate over points for differential encoding
- for (std::size_t i = 0; i < len; i++)
+ for (const auto& idx: indexVector_arg)
{
- const int& idx = indexVector_arg[i];
const char* idxPointPtr = reinterpret_cast<const char*> (&(*inputCloud_arg)[idx]);
const int& colorInt = *reinterpret_cast<const int*> (idxPointPtr+rgba_offset_arg);
* \param rgba_offset_arg offset to color information
*/
void
- decodePoints (PointCloudPtr outputCloud_arg, std::size_t beginIdx_arg, std::size_t endIdx_arg, unsigned char rgba_offset_arg)
+ decodePoints (PointCloudPtr outputCloud_arg, uindex_t beginIdx_arg, uindex_t endIdx_arg, unsigned char rgba_offset_arg)
{
assert (beginIdx_arg <= endIdx_arg);
// amount of points to be decoded
- unsigned int pointCount = static_cast<unsigned int> (endIdx_arg - beginIdx_arg);
+ const index_t pointCount = endIdx_arg - beginIdx_arg;
// get averaged color information for current voxel
unsigned char avgRed = *(pointAvgColorDataVector_Iterator_++);
avgBlue = static_cast<unsigned char> (avgBlue << colorBitReduction_);
// iterate over points
- for (std::size_t i = 0; i < pointCount; i++)
+ for (index_t i = 0; i < pointCount; i++)
{
unsigned int colorInt;
if (pointCount > 1)
#pragma once
-#include <map>
#include <iostream>
#include <vector>
-#include <string>
#include <cmath>
-#include <algorithm>
-#include <cstdio>
#include <cstdint>
#include <pcl/pcl_macros.h>
protected:
using DWord = std::uint32_t; // 4 bytes
- /** \brief Helper function to calculate the binary logarithm
- * \param n_arg: some value
- * \return binary logarithm (log2) of argument n_arg
- */
- PCL_DEPRECATED(1, 12, "use std::log2 instead")
- inline double
- Log2 (double n_arg)
- {
- return std::log2 (n_arg);
- }
-
private:
/** \brief Vector containing cumulative symbol frequency table. */
std::vector<std::uint64_t> cFreqTable_;
#define __PCL_IO_RANGECODING__HPP
#include <pcl/compression/entropy_range_coder.h>
-#include <map>
#include <iostream>
#include <vector>
#include <cstring>
#include <algorithm>
-#include <cstdio>
//////////////////////////////////////////////////////////////////////////////////////////////
unsigned long
#ifndef OCTREE_COMPRESSION_HPP
#define OCTREE_COMPRESSION_HPP
+#include <pcl/common/io.h> // for getFieldIndex
#include <pcl/compression/entropy_range_coder.h>
-#include <iterator>
#include <iostream>
#include <vector>
#include <cstring>
-#include <iostream>
-#include <cstdio>
namespace pcl
{
LeafT &leaf_arg, const OctreeKey & key_arg)
{
// reference to point indices vector stored within octree leaf
- const std::vector<int>& leafIdx = leaf_arg.getPointIndicesVector();
+ const auto& leafIdx = leaf_arg.getPointIndicesVector();
if (!do_voxel_grid_enDecoding_)
{
for (std::size_t i = 0; i < pointCount; i++)
output_->points.push_back (newPoint);
- // calculcate position of lower voxel corner
+ // calculate position of lower voxel corner
double lowerVoxelCorner[3];
lowerVoxelCorner[0] = static_cast<double> (key_arg.x) * this->resolution_ + this->min_x_;
lowerVoxelCorner[1] = static_cast<double> (key_arg.y) * this->resolution_ + this->min_y_;
#include <pcl/pcl_macros.h>
#include <pcl/point_cloud.h>
-#include <pcl/common/boost.h>
-#include <pcl/common/eigen.h>
-#include <pcl/common/common.h>
-#include <pcl/common/io.h>
-
#include <pcl/compression/libpng_wrapper.h>
#include <pcl/compression/organized_pointcloud_conversion.h>
-#include <string>
#include <vector>
-#include <limits>
#include <cassert>
namespace pcl
#pragma once
-#include <pcl/common/common.h>
-#include <pcl/common/io.h>
#include <pcl/octree/octree2buf_base.h>
#include <pcl/octree/octree_pointcloud.h>
#include "entropy_range_coder.h"
#include "compression_profiles.h"
-#include <cstdio>
-#include <cstring>
#include <iostream>
-#include <iterator>
#include <vector>
using namespace pcl::octree;
* \param[in] pointIdx_arg the index representing the point in the dataset given by \a setInputCloud to be added
*/
void
- addPointIdx (const int pointIdx_arg) override
+ addPointIdx (const uindex_t pointIdx_arg) override
{
++object_count_;
OctreePointCloud<PointT, LeafT, BranchT, OctreeT>::addPointIdx(pointIdx_arg);
/** \brief Vector for storing binary tree structure */
std::vector<char> binary_tree_data_vector_;
- /** \brief Iterator on binary tree structure vector */
- std::vector<char> binary_color_tree_vector_;
-
/** \brief Vector for storing points per voxel information */
std::vector<unsigned int> point_count_data_vector_;
#include <pcl/pcl_macros.h>
#include <pcl/point_cloud.h>
-#include <pcl/common/boost.h>
-#include <pcl/common/eigen.h>
-#include <pcl/common/common.h>
-#include <pcl/common/io.h>
-
#include <pcl/io/openni_camera/openni_shift_to_depth_conversion.h>
#include <vector>
assert(disparityData_arg.size()==cloud_size);
// Reset point cloud
- cloud_arg.points.clear ();
- cloud_arg.points.reserve (cloud_size);
+ cloud_arg.clear ();
+ cloud_arg.reserve (cloud_size);
// Define point cloud parameters
cloud_arg.width = static_cast<std::uint32_t> (width_arg);
newPoint.x = newPoint.y = newPoint.z = bad_point;
}
- cloud_arg.points.push_back (newPoint);
+ cloud_arg.push_back (newPoint);
}
}
assert(depthData_arg.size()==cloud_size);
// Reset point cloud
- cloud_arg.points.clear ();
- cloud_arg.points.reserve (cloud_size);
+ cloud_arg.clear ();
+ cloud_arg.reserve (cloud_size);
// Define point cloud parameters
cloud_arg.width = static_cast<std::uint32_t> (width_arg);
newPoint.x = newPoint.y = newPoint.z = bad_point;
}
- cloud_arg.points.push_back (newPoint);
+ cloud_arg.push_back (newPoint);
}
}
};
}
// Reset point cloud
- cloud_arg.points.clear();
- cloud_arg.points.reserve(cloud_size);
+ cloud_arg.clear();
+ cloud_arg.reserve(cloud_size);
// Define point cloud parameters
cloud_arg.width = static_cast<std::uint32_t>(width_arg);
}
// Add point to cloud
- cloud_arg.points.push_back(newPoint);
+ cloud_arg.push_back(newPoint);
// Increment point iterator
++i;
}
}
// Reset point cloud
- cloud_arg.points.clear();
- cloud_arg.points.reserve(cloud_size);
+ cloud_arg.clear();
+ cloud_arg.reserve(cloud_size);
// Define point cloud parameters
cloud_arg.width = static_cast<std::uint32_t>(width_arg);
}
// Add point to cloud
- cloud_arg.points.push_back(newPoint);
+ cloud_arg.push_back(newPoint);
// Increment point iterator
++i;
}
#pragma once
#include <algorithm>
-#include <cstdio>
-#include <cstring>
#include <iostream>
-#include <iterator>
#include <vector>
namespace pcl
* \param inputCloud_arg input point cloud
*/
void
- encodePoints (const typename std::vector<int>& indexVector_arg, const double* referencePoint_arg,
+ encodePoints (const Indices& indexVector_arg, const double* referencePoint_arg,
PointCloudConstPtr inputCloud_arg)
{
- std::size_t len = indexVector_arg.size ();
-
// iterate over points within current voxel
- for (std::size_t i = 0; i < len; i++)
+ for (const auto& idx: indexVector_arg)
{
unsigned char diffX, diffY, diffZ;
// retrieve point from cloud
- const int& idx = indexVector_arg[i];
const PointT& idxPoint = (*inputCloud_arg)[idx];
// differentially encode point coordinates and truncate overflow
* \param endIdx_arg index indicating last point to be assigned with color information
*/
void
- decodePoints (PointCloudPtr outputCloud_arg, const double* referencePoint_arg, std::size_t beginIdx_arg,
- std::size_t endIdx_arg)
+ decodePoints (PointCloudPtr outputCloud_arg, const double* referencePoint_arg, uindex_t beginIdx_arg,
+ uindex_t endIdx_arg)
{
assert (beginIdx_arg <= endIdx_arg);
- unsigned int pointCount = static_cast<unsigned int> (endIdx_arg - beginIdx_arg);
+ const uindex_t pointCount = endIdx_arg - beginIdx_arg;
// iterate over points within current voxel
- for (std::size_t i = 0; i < pointCount; i++)
+ for (uindex_t i = 0; i < pointCount; i++)
{
// retrieve differential point information
const unsigned char& diffX = static_cast<unsigned char> (*(pointDiffDataVectorIterator_++));
void
setInputFields (const std::vector<pcl::PCLPointField>& fields);
-
- /** \brief Set the ascii file point fields using a point type.
- * \param[in] p a point type
- */
- template<typename PointT>
- PCL_DEPRECATED(1, 12, "use parameterless setInputFields<PointT>() instead")
- inline void setInputFields (const PointT p)
- {
- pcl::utils::ignore(p);
- setInputFields<PointT> ();
- }
-
-
/** \brief Set the Separating characters for the ascii point fields 2.
* \param[in] chars string of separating characters
* Sets the separating characters for the point fields. The
*/
#pragma once
-
+PCL_DEPRECATED_HEADER(1, 15, "Please include the needed boost headers directly.")
#if defined __GNUC__
# pragma GCC system_header
#endif
#pragma once
-#include <cassert>
-#include <limits>
#include <mutex>
#include <vector>
-#include <cstdint>
-
namespace pcl
{
#pragma once
#include <pcl/pcl_exports.h>
-#include <cstdlib>
namespace pcl
{
#if defined __GNUC__
# pragma GCC system_header
#endif
+PCL_DEPRECATED_HEADER(1, 15, "Please include the needed eigen headers directly.")
#include <Eigen/Core>
#include <pcl/common/time.h>
#include <pcl/common/io.h>
-#include <pcl/io/eigen.h>
#include <Eigen/Geometry>
#include <Eigen/StdVector>
-#include <pcl/io/boost.h>
#include <pcl/io/grabber.h>
#include <pcl/common/synchronizer.h>
#include <pcl/conversions.h> // for fromPCLPointCloud2, toPCLPointCloud2
#include <pcl/point_cloud.h> // for PointCloud
#include <pcl/PCLPointCloud2.h> // for PCLPointCloud2
-#include <pcl/io/boost.h>
#include <cmath>
#include <sstream>
#include <Eigen/Geometry> // for Quaternionf
+#include <boost/numeric/conversion/cast.hpp> // for numeric_cast
+#include <boost/algorithm/string/predicate.hpp> // for iequals
namespace pcl
{
template <typename Type> inline
std::enable_if_t<std::is_floating_point<Type>::value>
copyValueString (const pcl::PCLPointCloud2 &cloud,
- const unsigned int point_index,
+ const pcl::index_t point_index,
const int point_size,
const unsigned int field_idx,
const unsigned int fields_count,
template <typename Type> inline
std::enable_if_t<std::is_integral<Type>::value>
copyValueString (const pcl::PCLPointCloud2 &cloud,
- const unsigned int point_index,
+ const pcl::index_t point_index,
const int point_size,
const unsigned int field_idx,
const unsigned int fields_count,
template <> inline void
copyValueString<std::int8_t> (const pcl::PCLPointCloud2 &cloud,
- const unsigned int point_index,
+ const pcl::index_t point_index,
const int point_size,
const unsigned int field_idx,
const unsigned int fields_count,
template <> inline void
copyValueString<std::uint8_t> (const pcl::PCLPointCloud2 &cloud,
- const unsigned int point_index,
+ const pcl::index_t point_index,
const int point_size,
const unsigned int field_idx,
const unsigned int fields_count,
template <typename Type> inline
std::enable_if_t<std::is_floating_point<Type>::value, bool>
isValueFinite (const pcl::PCLPointCloud2 &cloud,
- const unsigned int point_index,
+ const pcl::index_t point_index,
const int point_size,
const unsigned int field_idx,
const unsigned int fields_count)
template <typename Type> inline
std::enable_if_t<std::is_integral<Type>::value, bool>
isValueFinite (const pcl::PCLPointCloud2& /* cloud */,
- const unsigned int /* point_index */,
+ const pcl::index_t /* point_index */,
const int /* point_size */,
const unsigned int /* field_idx */,
const unsigned int /* fields_count */)
return true;
}
- /** \brief Copy one single value of type T (uchar, char, uint, int, float, double, ...) from a string
- *
- * Uses aoti/atof to do the conversion.
- * Checks if the st is "nan" and converts it accordingly.
- *
- * \param[in] st the string containing the value to convert and copy
- * \param[out] cloud the cloud to copy it to
- * \param[in] point_index the index of the point
- * \param[in] field_idx the index of the dimension/field
- * \param[in] fields_count the current fields count
- */
- template <typename Type> inline void
- copyStringValue (const std::string &st, pcl::PCLPointCloud2 &cloud,
- unsigned int point_index, unsigned int field_idx, unsigned int fields_count)
+ namespace detail {
+ template <typename Type>
+ inline void
+ copyStringValue(const std::string& st,
+ pcl::PCLPointCloud2& cloud,
+ pcl::index_t point_index,
+ unsigned int field_idx,
+ unsigned int fields_count,
+ std::istringstream& is)
{
Type value;
- if (boost::iequals(st, "nan"))
- {
- value = std::numeric_limits<Type>::quiet_NaN ();
+ if (boost::iequals(st, "nan")) {
+ value = std::numeric_limits<Type>::quiet_NaN();
cloud.is_dense = false;
}
- else
- {
- std::istringstream is (st);
- is.imbue (std::locale::classic ());
+ else {
+ is.str(st);
if (!(is >> value))
- value = static_cast<Type> (atof (st.c_str ()));
+ value = static_cast<Type>(atof(st.c_str()));
}
- memcpy (&cloud.data[point_index * cloud.point_step +
- cloud.fields[field_idx].offset +
- fields_count * sizeof (Type)], reinterpret_cast<char*> (&value), sizeof (Type));
+ memcpy(&cloud.data[point_index * cloud.point_step + cloud.fields[field_idx].offset +
+ fields_count * sizeof(Type)],
+ reinterpret_cast<char*>(&value),
+ sizeof(Type));
}
- template <> inline void
- copyStringValue<std::int8_t> (const std::string &st, pcl::PCLPointCloud2 &cloud,
- unsigned int point_index, unsigned int field_idx, unsigned int fields_count)
+ template <>
+ inline void
+ copyStringValue<std::int8_t>(const std::string& st,
+ pcl::PCLPointCloud2& cloud,
+ pcl::index_t point_index,
+ unsigned int field_idx,
+ unsigned int fields_count,
+ std::istringstream& is)
{
std::int8_t value;
- if (boost::iequals(st, "nan"))
- {
- value = static_cast<std::int8_t> (std::numeric_limits<int>::quiet_NaN ());
- cloud.is_dense = false;
- }
- else
- {
- int val;
- std::istringstream is (st);
- is.imbue (std::locale::classic ());
- //is >> val; -- unfortunately this fails on older GCC versions and CLANG on MacOS
- if (!(is >> val))
- val = static_cast<int> (atof (st.c_str ()));
- value = static_cast<std::int8_t> (val);
+ int val;
+ is.str(st);
+ // is >> val; -- unfortunately this fails on older GCC versions and CLANG on MacOS
+ if (!(is >> val)) {
+ val = static_cast<int>(atof(st.c_str()));
}
+ value = static_cast<std::int8_t>(val);
- memcpy (&cloud.data[point_index * cloud.point_step +
- cloud.fields[field_idx].offset +
- fields_count * sizeof (std::int8_t)], reinterpret_cast<char*> (&value), sizeof (std::int8_t));
+ memcpy(&cloud.data[point_index * cloud.point_step + cloud.fields[field_idx].offset +
+ fields_count * sizeof(std::int8_t)],
+ reinterpret_cast<char*>(&value),
+ sizeof(std::int8_t));
}
- template <> inline void
- copyStringValue<std::uint8_t> (const std::string &st, pcl::PCLPointCloud2 &cloud,
- unsigned int point_index, unsigned int field_idx, unsigned int fields_count)
+ template <>
+ inline void
+ copyStringValue<std::uint8_t>(const std::string& st,
+ pcl::PCLPointCloud2& cloud,
+ pcl::index_t point_index,
+ unsigned int field_idx,
+ unsigned int fields_count,
+ std::istringstream& is)
{
std::uint8_t value;
- if (boost::iequals(st, "nan"))
- {
- value = static_cast<std::uint8_t> (std::numeric_limits<int>::quiet_NaN ());
- cloud.is_dense = false;
- }
- else
- {
- int val;
- std::istringstream is (st);
- is.imbue (std::locale::classic ());
- //is >> val; -- unfortunately this fails on older GCC versions and CLANG on MacOS
- if (!(is >> val))
- val = static_cast<int> (atof (st.c_str ()));
- value = static_cast<std::uint8_t> (val);
+ int val;
+ is.str(st);
+ // is >> val; -- unfortunately this fails on older GCC versions and CLANG on
+ // MacOS
+ if (!(is >> val)) {
+ val = static_cast<int>(atof(st.c_str()));
}
+ value = static_cast<std::uint8_t>(val);
- memcpy (&cloud.data[point_index * cloud.point_step +
- cloud.fields[field_idx].offset +
- fields_count * sizeof (std::uint8_t)], reinterpret_cast<char*> (&value), sizeof (std::uint8_t));
+ memcpy(&cloud.data[point_index * cloud.point_step + cloud.fields[field_idx].offset +
+ fields_count * sizeof(std::uint8_t)],
+ reinterpret_cast<char*>(&value),
+ sizeof(std::uint8_t));
}
+ } // namespace detail
+ /**
+ * \brief Copy one single value of type T (uchar, char, uint, int, float, double, ...) from a string
+ * \details Uses `istringstream` to do the conversion in classic locale
+ * Checks if the st is "nan" and converts it accordingly.
+ *
+ * \param[in] st the string containing the value to convert and copy
+ * \param[out] cloud the cloud to copy it to
+ * \param[in] point_index the index of the point
+ * \param[in] field_idx the index of the dimension/field
+ * \param[in] fields_count the current fields count
+ */
+ template <typename Type> inline void
+ copyStringValue (const std::string &st, pcl::PCLPointCloud2 &cloud,
+ pcl::index_t point_index, unsigned int field_idx, unsigned int fields_count)
+ {
+ std::istringstream is;
+ is.imbue (std::locale::classic ());
+ detail::copyStringValue<Type> (st, cloud,point_index, field_idx, fields_count, is);
+ }
+/**
+ * \brief Copy one single value of type T (uchar, char, uint, int, float, double, ...) from a string
+ * \details Uses the provided `istringstream` to do the conversion, respecting its locale settings
+ * Checks if the st is "nan" and converts it accordingly.
+ *
+ * \param[in] st the string containing the value to convert and copy
+ * \param[out] cloud the cloud to copy it to
+ * \param[in] point_index the index of the point
+ * \param[in] field_idx the index of the dimension/field
+ * \param[in] fields_count the current fields count
+ * \param[in,out] is input string stream for helping to convert st into cloud
+ */
+ template <typename Type> inline void
+ copyStringValue (const std::string &st, pcl::PCLPointCloud2 &cloud,
+ pcl::index_t point_index, unsigned int field_idx, unsigned int fields_count,
+ std::istringstream& is)
+ {
+ detail::copyStringValue<Type> (st, cloud,point_index, field_idx, fields_count, is);
+ }
}
#include <vector>
#include <sstream>
#include <pcl/pcl_macros.h>
-#include <pcl/io/boost.h>
#include <pcl/exceptions.h>
+#include <boost/signals2.hpp> // for connection, signal, ...
namespace pcl
{
template<typename T> boost::signals2::connection
registerCallback (const std::function<T>& callback);
- /** \brief registers a callback function/method to a signal with the corresponding signature
- * \param[in] callback: the callback function/method
- * \return Connection object, that can be used to disconnect the callback method from the signal again.
- */
- template<typename T, template<typename> class FunctionT>
- PCL_DEPRECATED (1, 12, "please assign the callback to a std::function.")
- boost::signals2::connection
- registerCallback (const FunctionT<T>& callback)
- {
- return registerCallback (std::function<T> (callback));
- }
-
/** \brief indicates whether a signal with given parameter-type exists or not
* \return true if signal exists, false otherwise
*/
*/
using sig_cb_velodyne_hdl_scan_point_cloud_xyzrgba = void (const pcl::PointCloud<pcl::PointXYZRGBA>::ConstPtr &, float, float);
- using sig_cb_velodyne_hdl_scan_point_cloud_xyzrgb PCL_DEPRECATED(1, 12, "use sig_cb_velodyne_hdl_scan_point_cloud_xyzrgba instead")
- = sig_cb_velodyne_hdl_scan_point_cloud_xyzrgba;
-
/** \brief Signal used for a single sector
* Represents 1 corrected packet from the HDL Velodyne with the returned intensity.
*/
*/
using sig_cb_velodyne_hdl_sweep_point_cloud_xyzrgba = void (const pcl::PointCloud<pcl::PointXYZRGBA>::ConstPtr &);
- using sig_cb_velodyne_hdl_sweep_point_cloud_xyzrgb PCL_DEPRECATED(1, 12, "use sig_cb_velodyne_hdl_sweep_point_cloud_xyzrgba instead")
- = sig_cb_velodyne_hdl_sweep_point_cloud_xyzrgba;
-
/** \brief Constructor taking an optional path to an HDL corrections file. The Grabber will listen on the default IP/port for data packets [192.168.3.255/2368]
* \param[in] correctionsFile Path to a file which contains the correction parameters for the HDL. This parameter is mandatory for the HDL-64, optional for the HDL-32
* \param[in] pcapFile Path to a file which contains previously captured data packets. This parameter is optional
#include <pcl/point_cloud.h>
#include <pcl/PCLPointCloud2.h>
#include <pcl/conversions.h>
-#include <pcl/io/boost.h>
#include <pcl/PolygonMesh.h>
namespace pcl
#pragma once
-#include <pcl/io/boost.h>
#include <pcl/io/image_metadata_wrapper.h>
#include <pcl/memory.h>
#include <pcl/pcl_config.h>
#include <pcl/pcl_config.h>
#include <pcl/pcl_exports.h>
-#include <pcl/io/boost.h>
#include<pcl/io/image_metadata_wrapper.h>
#include <pcl/memory.h>
#include <pcl/pcl_macros.h>
-#include <pcl/io/boost.h>
#include <pcl/io/image_metadata_wrapper.h>
#ifndef PCL_IO_AUTO_IO_IMPL_H_
#define PCL_IO_AUTO_IO_IMPL_H_
-// #include <pcl/io/file_io.h>
-// #include <pcl/io/boost.h>
#include <pcl/io/pcd_io.h>
#include <pcl/io/ply_io.h>
#include <pcl/io/ifs_io.h>
-// #include <pcl/io/vtk_io.h>
+#include <boost/filesystem.hpp> // for path
namespace pcl
{
result = pcl::io::loadIFSFile (file_name, cloud);
else
{
- PCL_ERROR ("[pcl::io::load] Don't know how to handle file with extension %s", extension.c_str ());
+ PCL_ERROR ("[pcl::io::load] Don't know how to handle file with extension %s\n", extension.c_str ());
result = -1;
}
return (result);
result = pcl::io::saveIFSFile (file_name, cloud);
else
{
- PCL_ERROR ("[pcl::io::save] Don't know how to handle file with extension %s", extension.c_str ());
+ PCL_ERROR ("[pcl::io::save] Don't know how to handle file with extension %s\n", extension.c_str ());
result = -1;
}
return (result);
#define PCL_IO_IMPL_BUFFERS_HPP
#include <iostream>
-#include <cstring>
#include <pcl/pcl_macros.h>
#ifndef PCL_IO_PCD_IO_IMPL_H_
#define PCL_IO_PCD_IO_IMPL_H_
+#include <boost/algorithm/string/trim.hpp> // for trim
#include <fstream>
#include <fcntl.h>
#include <string>
#include <cstdlib>
#include <pcl/common/io.h> // for getFields, ...
#include <pcl/console/print.h>
-#include <pcl/io/boost.h>
#include <pcl/io/low_level_io.h>
#include <pcl/io/pcd_io.h>
pcl::PCDWriter::writeBinaryCompressed (const std::string &file_name,
const pcl::PointCloud<PointT> &cloud)
{
- if (cloud.points.empty ())
+ if (cloud.empty ())
{
throw pcl::IOException ("[pcl::PCDWriter::writeBinaryCompressed] Input point cloud has no data!");
return (-1);
}
std::ofstream fs;
- fs.open (file_name.c_str ()); // Open file
+ fs.open (file_name.c_str (), std::ios::binary); // Open file
if (!fs.is_open () || fs.fail ())
{
template <typename PointT> int
pcl::PCDWriter::writeBinary (const std::string &file_name,
const pcl::PointCloud<PointT> &cloud,
- const std::vector<int> &indices)
+ const pcl::Indices &indices)
{
- if (cloud.points.empty () || indices.empty ())
+ if (cloud.empty () || indices.empty ())
{
throw pcl::IOException ("[pcl::PCDWriter::writeBinary] Input point cloud has no data or empty indices given!");
return (-1);
char *out = &map[0] + data_idx;
// Copy the data
- for (const int &index : indices)
+ for (const auto &index : indices)
{
int nrj = 0;
for (const auto &field : fields)
template <typename PointT> int
pcl::PCDWriter::writeASCII (const std::string &file_name,
const pcl::PointCloud<PointT> &cloud,
- const std::vector<int> &indices,
+ const pcl::Indices &indices,
const int precision)
{
- if (cloud.points.empty () || indices.empty ())
+ if (cloud.empty () || indices.empty ())
{
throw pcl::IOException ("[pcl::PCDWriter::writeASCII] Input point cloud has no data or empty indices given!");
return (-1);
}
std::ofstream fs;
- fs.open (file_name.c_str ()); // Open file
+ fs.open (file_name.c_str (), std::ios::binary); // Open file
if (!fs.is_open () || fs.fail ())
{
throw pcl::IOException ("[pcl::PCDWriter::writeASCII] Could not open file for writing!");
stream.imbue (std::locale::classic ());
// Iterate through the points
- for (const int &index : indices)
+ for (const auto &index : indices)
{
for (std::size_t d = 0; d < fields.size (); ++d)
{
#pragma once
// PCL
-#include <pcl/common/io.h>
#include <pcl/common/point_tests.h> // for pcl::isFinite
-#include <pcl/io/pcd_io.h>
#include <pcl/point_types.h>
#include <pcl/type_traits.h>
cloud.width = polydata->GetNumberOfPoints ();
cloud.height = 1; // This indicates that the point cloud is unorganized
cloud.is_dense = false;
- cloud.points.resize (cloud.width * cloud.height);
+ cloud.resize (cloud.width * cloud.height);
// Get a list of all the fields available
std::vector<pcl::PCLPointField> fields;
cloud.width = dimensions[0];
cloud.height = dimensions[1]; // This indicates that the point cloud is organized
cloud.is_dense = true;
- cloud.points.resize (cloud.width * cloud.height);
+ cloud.resize (cloud.width * cloud.height);
// Get a list of all the fields available
std::vector<pcl::PCLPointField> fields;
*/
#pragma once
-
+PCL_DEPRECATED_HEADER(1, 15, "Please include pcl/common/io.h directly.")
#include <pcl/common/io.h>
# endif
# include <io.h>
# include <windows.h>
-# include <BaseTsd.h>
+# ifdef _MSC_VER
+// ssize_t is already defined in MinGW and its definition conflicts with that of
+// SSIZE_T on a 32-bit target, so do this only for MSVC.
+# include <basetsd.h>
using ssize_t = SSIZE_T;
+# endif /* _MSC_VER */
#else
# include <unistd.h>
# include <sys/mman.h>
#ifdef HAVE_OPENNI
#include <pcl/point_cloud.h>
-#include <pcl/io/eigen.h>
-#include <pcl/io/boost.h>
#include <pcl/io/grabber.h>
#include <pcl/io/openni_camera/openni_driver.h>
#include <pcl/io/openni_camera/openni_device_oni.h>
#include <pcl/io/openni_camera/openni_depth_image.h>
#include <pcl/io/openni_camera/openni_ir_image.h>
#include <string>
-#include <deque>
#include <pcl/common/synchronizer.h>
namespace pcl
#pragma once
-#include <cstddef>
#include <ostream>
#include <OpenNI.h>
#ifdef HAVE_OPENNI2
#include <pcl/point_cloud.h>
-#include <pcl/io/eigen.h>
-#include <pcl/io/boost.h>
#include <pcl/io/grabber.h>
#include <pcl/io/openni2/openni2_device.h>
#include <string>
-#include <deque>
#include <pcl/common/synchronizer.h>
#include <pcl/io/image.h>
#endif
#include <XnOS.h>
-//work around for qt 5 bug: https://bugreports.qt-project.org/browse/QTBUG-29331
-#ifndef Q_MOC_RUN
#include <XnCppWrapper.h>
-#endif // Q_MOC_RUN
#include <XnVersion.h>
#endif
//#include <pcl/pcl_macros.h> // <-- because current header is included in NVCC-compiled code and contains <Eigen/Core>. Consider <pcl/pcl_exports.h>
#include <pcl/pcl_exports.h>
#include "openni_exception.h"
-#include <pcl/io/boost.h>
namespace openni_wrapper
{
#include "openni_exception.h"
#include "openni.h"
-#include <pcl/io/boost.h>
#include <pcl/io/openni_camera/openni_image.h>
#include <pcl/io/openni_camera/openni_depth_image.h>
#include <pcl/io/openni_camera/openni_ir_image.h>
#include <map>
#include <mutex>
#include <thread>
-#include <utility>
#include <vector>
/// @todo Get rid of all exception-specifications, these are useless and soon to be deprecated
#include <pcl/pcl_exports.h>
#include "openni.h"
#include "openni_exception.h"
-#include <pcl/io/boost.h>
namespace openni_wrapper
{
#include <pcl/memory.h>
#include "openni.h"
#include "openni_exception.h"
-#include <pcl/io/boost.h>
namespace openni_wrapper
{
#ifdef HAVE_OPENNI
#include <pcl/point_cloud.h>
-#include <pcl/io/eigen.h>
-#include <pcl/io/boost.h>
#include <pcl/io/grabber.h>
#include <pcl/io/openni_camera/openni_driver.h>
#include <pcl/io/openni_camera/openni_device_kinect.h>
#include <pcl/io/openni_camera/openni_depth_image.h>
#include <pcl/io/openni_camera/openni_ir_image.h>
#include <string>
-#include <deque>
#include <pcl/common/synchronizer.h>
+#include <boost/shared_array.hpp> // for shared_array
namespace pcl
{
#include <pcl/pcl_macros.h>
#include <pcl/point_cloud.h>
#include <pcl/io/file_io.h>
+#include <boost/interprocess/sync/file_lock.hpp> // for file_lock
namespace pcl
{
template <typename PointT> int
writeBinary (const std::string &file_name,
const pcl::PointCloud<PointT> &cloud,
- const std::vector<int> &indices);
+ const pcl::Indices &indices);
/** \brief Save point cloud data to a PCD file containing n-D points, in ASCII format
* \param[in] file_name the output file name
template <typename PointT> int
writeASCII (const std::string &file_name,
const pcl::PointCloud<PointT> &cloud,
- const std::vector<int> &indices,
+ const pcl::Indices &indices,
const int precision = 8);
/** \brief Save point cloud data to a PCD file containing n-D points
template<typename PointT> inline int
write (const std::string &file_name,
const pcl::PointCloud<PointT> &cloud,
- const std::vector<int> &indices,
+ const pcl::Indices &indices,
bool binary = false)
{
if (binary)
template<typename PointT> int
savePCDFile (const std::string &file_name,
const pcl::PointCloud<PointT> &cloud,
- const std::vector<int> &indices,
+ const pcl::Indices &indices,
const bool binary_mode = false)
{
// Save the data
#include <boost/predef/other/endian.h>
-#include <algorithm>
#include <cstddef>
+#include <utility> // for swap
namespace pcl
{
#pragma once
-#include <pcl/io/boost.h>
#include <pcl/io/ply/byte_order.h>
+#include <cstdint> // for int8_t, int16_t, ...
/** \file ply.h contains standard typedefs and generic type traits
* \author Ares Lagae as part of libply, Nizar Sallem
#pragma once
-#include <pcl/io/boost.h>
#include <pcl/io/ply/ply.h>
#include <pcl/io/ply/io_operators.h>
#include <pcl/pcl_macros.h>
#include <string>
#include <tuple>
#include <vector>
+#include <boost/lexical_cast.hpp> // for lexical_cast
+#include <boost/mpl/fold.hpp> // for fold
+#include <boost/mpl/inherit.hpp> // for inherit
+#include <boost/mpl/inherit_linearly.hpp> // for inherit_linearly
+#include <boost/mpl/joint_view.hpp> // for joint_view
+#include <boost/mpl/transform.hpp> // for transform
+#include <boost/mpl/vector.hpp> // for vector
namespace pcl
{
#include <pcl/memory.h>
#include <pcl/pcl_macros.h>
-#include <pcl/io/boost.h>
#include <pcl/io/file_io.h>
#include <pcl/io/ply/ply_parser.h>
#include <pcl/PolygonMesh.h>
*/
template<typename PointT> int
savePLYFile (const std::string &file_name, const pcl::PointCloud<PointT> &cloud,
- const std::vector<int> &indices, bool binary_mode = false)
+ const pcl::Indices &indices, bool binary_mode = false)
{
// Copy indices to a new point cloud
pcl::PointCloud<PointT> cloud_out;
#include <thread>
#include <mutex>
-#include <pcl/io/boost.h>
#include <pcl/io/grabber.h>
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
RealSense2Grabber ( const std::string& file_name_or_serial_number = "", const bool repeat_playback = true );
/** \brief virtual Destructor inherited from the Grabber interface. It never throws. */
- virtual ~RealSense2Grabber () noexcept;
+ ~RealSense2Grabber ();
/** \brief Set the device options
* \param[in] width resolution
#include <pcl/point_cloud.h>
#include <pcl/memory.h>
#include <boost/asio.hpp>
+#include <boost/shared_array.hpp> // for shared_array
#include <memory>
#include <thread>
#include <pcl/TextureMesh.h>
#include <pcl/pcl_macros.h>
#include <pcl/conversions.h>
-#include <pcl/io/pcd_io.h>
#include <pcl/range_image/range_image_planar.h>
// Ignore warnings in the above headers
#ifdef __GNUC__
#pragma GCC system_header
#endif
-#include <vtkVersion.h>
#include <vtkSmartPointer.h>
#include <vtkStructuredGrid.h>
#include <vtkPoints.h>
#include <istream>
#include <fstream>
#include <boost/filesystem.hpp>
+#include <boost/lexical_cast.hpp> // for lexical_cast
+#include <boost/algorithm/string.hpp> // for split
#include <cstdint>
//////////////////////////////////////////////////////////////////////////////
result = pcl::io::loadOBJFile (file_name, blob);
else
{
- PCL_ERROR ("[pcl::io::load] Don't know how to handle file with extension %s", extension.c_str ());
+ PCL_ERROR ("[pcl::io::load] Don't know how to handle file with extension %s\n", extension.c_str ());
result = -1;
}
return (result);
result = pcl::io::loadOBJFile (file_name, mesh);
else
{
- PCL_ERROR ("[pcl::io::load] Don't know how to handle file with extension %s", extension.c_str ());
+ PCL_ERROR ("[pcl::io::load] Don't know how to handle file with extension %s\n", extension.c_str ());
result = -1;
}
return (result);
result = pcl::io::loadOBJFile (file_name, mesh);
else
{
- PCL_ERROR ("[pcl::io::load] Don't know how to handle file with extension %s", extension.c_str ());
+ PCL_ERROR ("[pcl::io::load] Don't know how to handle file with extension %s\n", extension.c_str ());
result = -1;
}
return (result);
result = pcl::io::saveVTKFile (file_name, blob, precision);
else
{
- PCL_ERROR ("[pcl::io::save] Don't know how to handle file with extension %s", extension.c_str ());
+ PCL_ERROR ("[pcl::io::save] Don't know how to handle file with extension %s\n", extension.c_str ());
result = -1;
}
return (result);
result = pcl::io::saveOBJFile (file_name, tex_mesh, precision);
else
{
- PCL_ERROR ("[pcl::io::save] Don't know how to handle file with extension %s", extension.c_str ());
+ PCL_ERROR ("[pcl::io::save] Don't know how to handle file with extension %s\n", extension.c_str ());
result = -1;
}
return (result);
result = pcl::io::saveVTKFile (file_name, poly_mesh, precision);
else
{
- PCL_ERROR ("[pcl::io::save] Don't know how to handle file with extension %s", extension.c_str ());
+ PCL_ERROR ("[pcl::io::save] Don't know how to handle file with extension %s\n", extension.c_str ());
result = -1;
}
return (result);
* POSSIBILITY OF SUCH DAMAGE.
*
*/
+#include <cstdlib> // for abs
+
#include <pcl/io/debayer.h>
#define AVG(a,b) static_cast<unsigned char>((int(a) + int(b)) >> 1)
, sync_packet_size_ (512)
, fov_ (64. * M_PI / 180.)
, context_ (nullptr)
+ , device_handle_ (nullptr)
, bulk_ep_ (std::numeric_limits<unsigned char>::max ())
, second_image_ (false)
, running_ (false)
for (int n = 0; n < cams.count (); ++n)
{
+#if NXLIB_VERSION_MAJOR > 2
PCL_INFO ("%s %s %s\n", cams[n][itmSerialNumber].asString ().c_str (),
cams[n][itmModelName].asString ().c_str (),
- cams[n][itmStatus].asString ().c_str ());
+ cams[n][itmStatus][itmOpen].asBool()
+ ? "Open"
+ : (cams[n][itmStatus][itmAvailable].asBool() ? "Available" : "In Use"));
+#else
+ PCL_INFO ("%s %s %s\n", cams[n][itmSerialNumber].asString().c_str(),
+ cams[n][itmModelName].asString().c_str(),
+ cams[n][itmStatus].asString().c_str());
+#endif
}
PCL_INFO ("\n");
}
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Point Cloud Library (PCL) - www.pointclouds.org
- * Copyright (c) 2013-, Open Perception, Inc.
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of the copyright holder(s) nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include <pcl/io/file_io.h>
-
#include <thread>
#include <pcl/console/print.h>
-#include <pcl/io/boost.h>
#include <pcl/io/hdl_grabber.h>
#include <boost/version.hpp>
#include <boost/foreach.hpp>
*/
#include <fstream>
-#include <pcl/io/boost.h>
#include <pcl/common/io.h>
#include <pcl/io/ifs_io.h>
#include <pcl/console/time.h>
#include <cstring>
#include <cerrno>
+#include <boost/filesystem.hpp> // for exists
+#include <boost/iostreams/device/mapped_file.hpp> // for mapped_file_source
///////////////////////////////////////////////////////////////////////////////////////////
int
#include <pcl/pcl_config.h>
#include <pcl/io/image_depth.h>
-#include <sstream>
#include <limits>
-#include <iostream>
#include <pcl/io/io_exception.h>
#include <pcl/for_each_type.h>
#include <pcl/io/image_grabber.h>
#include <pcl/io/lzf_image_io.h>
-#include <pcl/io/pcd_io.h>
#include <pcl/memory.h>
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
+#include <boost/filesystem.hpp> // for exists, basename, is_directory, ...
+#include <boost/algorithm/string/case_conv.hpp> // for to_upper_copy
+#include <boost/date_time/posix_time/posix_time.hpp> // for posix_time
#ifdef PCL_BUILT_WITH_VTK
#include <vtkImageReader2.h>
if (!boost::filesystem::exists (dir) || !boost::filesystem::is_directory (dir))
{
PCL_ERROR ("[pcl::ImageGrabber::loadDepthAndRGBFiles] Error: attempted to instantiate a pcl::ImageGrabber from a path which"
- " is not a directory: %s", dir.c_str ());
+ " is not a directory: %s\n", dir.c_str ());
return;
}
std::string pathname;
if (!boost::filesystem::exists (depth_dir) || !boost::filesystem::is_directory (depth_dir))
{
PCL_ERROR ("[pcl::ImageGrabber::loadDepthAndRGBFiles] Error: attempted to instantiate a pcl::ImageGrabber from a path which"
- " is not a directory: %s", depth_dir.c_str ());
+ " is not a directory: %s\n", depth_dir.c_str ());
return;
}
if (!boost::filesystem::exists (rgb_dir) || !boost::filesystem::is_directory (rgb_dir))
{
PCL_ERROR ("[pcl::ImageGrabber::loadDepthAndRGBFiles] Error: attempted to instantiate a pcl::ImageGrabber from a path which"
- " is not a directory: %s", rgb_dir.c_str ());
+ " is not a directory: %s\n", rgb_dir.c_str ());
return;
}
std::string pathname;
}
}
if (depth_image_files_.size () != rgb_image_files_.size () )
- PCL_WARN ("[pcl::ImageGrabberBase::ImageGrabberImpl::loadDepthAndRGBFiles] : Watch out not same amount of depth and rgb images");
+ PCL_WARN ("[pcl::ImageGrabberBase::ImageGrabberImpl::loadDepthAndRGBFiles] : Watch out not same amount of depth and rgb images\n");
if (!depth_image_files_.empty ())
sort (depth_image_files_.begin (), depth_image_files_.end ());
else
- PCL_ERROR ("[pcl::ImageGrabberBase::ImageGrabberImpl::loadDepthAndRGBFiles] : no depth images added");
+ PCL_ERROR ("[pcl::ImageGrabberBase::ImageGrabberImpl::loadDepthAndRGBFiles] : no depth images added\n");
if (!rgb_image_files_.empty ())
sort (rgb_image_files_.begin (), rgb_image_files_.end ());
else
- PCL_ERROR ("[pcl::ImageGrabberBase::ImageGrabberImpl::loadDepthAndRGBFiles] : no rgb images added");
+ PCL_ERROR ("[pcl::ImageGrabberBase::ImageGrabberImpl::loadDepthAndRGBFiles] : no rgb images added\n");
}
if (!boost::filesystem::exists (dir) || !boost::filesystem::is_directory (dir))
{
PCL_ERROR ("[pcl::ImageGrabber::loadPCLZFFiles] Error: attempted to instantiate a pcl::ImageGrabber from a path which"
- " is not a directory: %s", dir.c_str ());
+ " is not a directory: %s\n", dir.c_str ());
return;
}
std::string pathname;
cloud_color.width = dims[0];
cloud_color.height = dims[1];
cloud_color.is_dense = false;
- cloud_color.points.resize (depth_image->GetNumberOfPoints ());
+ cloud_color.resize (depth_image->GetNumberOfPoints ());
for (int y = 0; y < dims[1]; ++y)
{
cloud.width = dims[0];
cloud.height = dims[1];
cloud.is_dense = false;
- cloud.points.resize (depth_image->GetNumberOfPoints ());
+ cloud.resize (depth_image->GetNumberOfPoints ());
for (int y = 0; y < dims[1]; ++y)
{
for (int x = 0; x < dims[0]; ++x, ++depth_pixel)
#include <pcl/pcl_config.h>
#include <pcl/io/image_ir.h>
-#include <sstream>
-#include <limits>
-#include <iostream>
-
#include <pcl/io/io_exception.h>
using pcl::io::FrameWrapper;
#include <pcl/io/obj_io.h>
#include <fstream>
#include <pcl/common/io.h>
-#include <pcl/io/boost.h>
#include <pcl/console/time.h>
+#include <boost/lexical_cast.hpp> // for lexical_cast
+#include <boost/filesystem.hpp> // for exists
+#include <boost/algorithm/string.hpp> // for split
pcl::MTLReader::MTLReader ()
{
{
if (fillRGBfromXYZ (st, *rgb))
{
- PCL_ERROR ("[pcl::MTLReader::read] Could not convert %s to RGB values",
+ PCL_ERROR ("[pcl::MTLReader::read] Could not convert %s to RGB values\n",
line.c_str ());
mtl_file.close ();
materials_.clear ();
{
if (fillRGBfromRGB (st, *rgb))
{
- PCL_ERROR ("[pcl::MTLReader::read] Could not convert %s to RGB values",
+ PCL_ERROR ("[pcl::MTLReader::read] Could not convert %s to RGB values\n",
line.c_str ());
mtl_file.close ();
materials_.clear ();
}
catch (boost::bad_lexical_cast &)
{
- PCL_ERROR ("[pcl::MTLReader::read] Could not convert %s to illumination model",
+ PCL_ERROR ("[pcl::MTLReader::read] Could not convert %s to illumination model\n",
line.c_str ());
mtl_file.close ();
materials_.clear ();
}
catch (boost::bad_lexical_cast &)
{
- PCL_ERROR ("[pcl::MTLReader::read] Could not convert %s to transparency value",
+ PCL_ERROR ("[pcl::MTLReader::read] Could not convert %s to transparency value\n",
line.c_str ());
mtl_file.close ();
materials_.clear ();
}
catch (boost::bad_lexical_cast &)
{
- PCL_ERROR ("[pcl::MTLReader::read] Could not convert %s to shininess value",
+ PCL_ERROR ("[pcl::MTLReader::read] Could not convert %s to shininess value\n",
line.c_str ());
mtl_file.close ();
materials_.clear ();
std::vector<std::string> st;
try
{
- std::size_t point_idx = 0;
- std::size_t normal_idx = 0;
+ uindex_t point_idx = 0;
+ uindex_t normal_idx = 0;
while (!fs.eof ())
{
}
catch (const boost::bad_lexical_cast&)
{
- PCL_ERROR ("Unable to convert %s to vertex coordinates!", line.c_str ());
+ PCL_ERROR ("Unable to convert %s to vertex coordinates!\n", line.c_str ());
return (-1);
}
continue;
}
catch (const boost::bad_lexical_cast&)
{
- PCL_ERROR ("Unable to convert line %s to vertex normal!", line.c_str ());
+ PCL_ERROR ("Unable to convert line %s to vertex normal!\n", line.c_str ());
return (-1);
}
continue;
}
catch (const boost::bad_lexical_cast&)
{
- PCL_ERROR ("Unable to convert %s to vertex coordinates!", line.c_str ());
+ PCL_ERROR ("Unable to convert %s to vertex coordinates!\n", line.c_str ());
return (-1);
}
continue;
}
catch (const boost::bad_lexical_cast&)
{
- PCL_ERROR ("Unable to convert line %s to vertex normal!", line.c_str ());
+ PCL_ERROR ("Unable to convert line %s to vertex normal!\n", line.c_str ());
return (-1);
}
continue;
}
catch (const boost::bad_lexical_cast&)
{
- PCL_ERROR ("Unable to convert line %s to texture coordinates!", line.c_str ());
+ PCL_ERROR ("Unable to convert line %s to texture coordinates!\n", line.c_str ());
return (-1);
}
continue;
}
catch (const boost::bad_lexical_cast&)
{
- PCL_ERROR ("Unable to convert %s to vertex coordinates!", line.c_str ());
+ PCL_ERROR ("Unable to convert %s to vertex coordinates!\n", line.c_str ());
return (-1);
}
continue;
}
catch (const boost::bad_lexical_cast&)
{
- PCL_ERROR ("Unable to convert line %s to vertex normal!", line.c_str ());
+ PCL_ERROR ("Unable to convert line %s to vertex normal!\n", line.c_str ());
return (-1);
}
continue;
#include <pcl/point_types.h>
#include <pcl/common/time.h>
#include <pcl/console/print.h>
-#include <pcl/io/boost.h> // for boost::shared_array
+#include <boost/shared_array.hpp> // for boost::shared_array
#include <pcl/memory.h> // for dynamic_pointer_cast
#include <pcl/exceptions.h>
// check if we have color point cloud slots
if (point_cloud_rgb_signal_->num_slots () > 0)
{
- PCL_WARN ("PointXYZRGB callbacks deprecated. Use PointXYZRGBA instead.");
+ PCL_WARN ("PointXYZRGB callbacks deprecated. Use PointXYZRGBA instead.\n");
point_cloud_rgb_signal_->operator() (convertToXYZRGBPointCloud (image, depth_image));
}
if ( (mode.x_resolution_ == 640) && (mode.y_resolution_ == 480) && (mode.frame_rate_ == 30.0) )
return mode;
}
+ if (modeList.empty())
+ THROW_IO_EXCEPTION("Device claims to have a IR sensor, but doesn't have any IR streaming mode");
return (modeList.at (0)); // Return first mode if we can't find VGA
}
if ( (mode.x_resolution_ == 640) && (mode.y_resolution_ == 480) && (mode.frame_rate_ == 30.0) )
return mode;
}
+ if (modeList.empty())
+ THROW_IO_EXCEPTION("Device claims to have a color sensor, but doesn't have any color streaming mode");
return (modeList.at (0)); // Return first mode if we can't find VGA
}
if ( (mode.x_resolution_ == 640) && (mode.y_resolution_ == 480) && (mode.frame_rate_ == 30.0) )
return mode;
}
+ if (modeList.empty())
+ THROW_IO_EXCEPTION("Device claims to have a depth sensor, but doesn't have any depth streaming mode");
return (modeList.at (0)); // Return first mode if we can't find VGA
}
#include <pcl/point_types.h>
#include <pcl/common/time.h>
#include <pcl/console/print.h>
-#include <pcl/io/boost.h>
#include <pcl/exceptions.h>
#include <iostream>
+#include <boost/filesystem.hpp> // for exists
using namespace pcl::io::openni2;
#include <pcl/io/openni_camera/openni_device_primesense.h>
#include <pcl/io/openni_camera/openni_image_yuv_422.h>
-#include <pcl/io/boost.h>
#include <iostream>
#include <mutex>
#endif
#include <pcl/io/openni_camera/openni_device_xtion.h>
-#include <pcl/io/boost.h>
#include <mutex>
#include <sstream>
#ifndef _WIN32
#include <libusb-1.0/libusb.h>
-#else
-#include <pcl/io/boost.h>
#endif
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#include <pcl/point_types.h>
#include <pcl/common/time.h>
#include <pcl/console/print.h>
-#include <pcl/io/boost.h>
#include <pcl/exceptions.h>
#include <iostream>
#include <thread>
+#include <boost/filesystem.hpp> // for exists
using namespace std::chrono_literals;
#include <fcntl.h>
#include <string>
#include <cstdlib>
-#include <pcl/io/boost.h>
#include <pcl/common/utils.h> // pcl::utils::ignore
#include <pcl/common/io.h>
#include <pcl/io/low_level_io.h>
#include <cstring>
#include <cerrno>
+#include <boost/filesystem.hpp> // for permissions
+#include <boost/algorithm/string.hpp> // for split
///////////////////////////////////////////////////////////////////////////////////////////
void
{
// Get the number of points the cloud should have
unsigned int nr_points = cloud.width * cloud.height;
+ // The number of elements each line/point should have
+ const unsigned int elems_per_line = std::accumulate (cloud.fields.cbegin (), cloud.fields.cend (), 0u,
+ [](const auto& i, const auto& field){ return (i + field.count); });
+ PCL_DEBUG ("[pcl::PCDReader::readBodyASCII] Will check that each line in the PCD file has %u elements.\n", elems_per_line);
// Setting the is_dense property to true by default
cloud.is_dense = true;
unsigned int idx = 0;
std::string line;
std::vector<std::string> st;
+ std::istringstream is;
+ is.imbue (std::locale::classic ());
try
{
boost::trim (line);
boost::split (st, line, boost::is_any_of ("\t\r "), boost::token_compress_on);
+ if (st.size () != elems_per_line) // If this is not checked, an exception might occur while accessing st
+ {
+ PCL_WARN ("[pcl::PCDReader::readBodyASCII] Possibly malformed PCD file: point number %u has %zu elements, but should have %u\n",
+ idx+1, st.size (), elems_per_line);
+ ++idx; // Skip this line/point, but read all others
+ continue;
+ }
+
if (idx >= nr_points)
{
PCL_WARN ("[pcl::PCDReader::read] input has more points (%d) than advertised (%d)!\n", idx, nr_points);
total += cloud.fields[d].count; // jump over this many elements in the string token
continue;
}
- for (unsigned int c = 0; c < cloud.fields[d].count; ++c)
+ for (uindex_t c = 0; c < cloud.fields[d].count; ++c)
{
switch (cloud.fields[d].datatype)
{
case pcl::PCLPointField::INT8:
{
copyStringValue<pcl::traits::asType<pcl::PCLPointField::INT8>::type> (
- st.at (total + c), cloud, idx, d, c);
+ st.at (total + c), cloud, idx, d, c, is);
break;
}
case pcl::PCLPointField::UINT8:
{
copyStringValue<pcl::traits::asType<pcl::PCLPointField::UINT8>::type> (
- st.at (total + c), cloud, idx, d, c);
+ st.at (total + c), cloud, idx, d, c, is);
break;
}
case pcl::PCLPointField::INT16:
{
copyStringValue<pcl::traits::asType<pcl::PCLPointField::INT16>::type> (
- st.at (total + c), cloud, idx, d, c);
+ st.at (total + c), cloud, idx, d, c, is);
break;
}
case pcl::PCLPointField::UINT16:
{
copyStringValue<pcl::traits::asType<pcl::PCLPointField::UINT16>::type> (
- st.at (total + c), cloud, idx, d, c);
+ st.at (total + c), cloud, idx, d, c, is);
break;
}
case pcl::PCLPointField::INT32:
{
copyStringValue<pcl::traits::asType<pcl::PCLPointField::INT32>::type> (
- st.at (total + c), cloud, idx, d, c);
+ st.at (total + c), cloud, idx, d, c, is);
break;
}
case pcl::PCLPointField::UINT32:
{
copyStringValue<pcl::traits::asType<pcl::PCLPointField::UINT32>::type> (
- st.at (total + c), cloud, idx, d, c);
+ st.at (total + c), cloud, idx, d, c, is);
break;
}
case pcl::PCLPointField::FLOAT32:
{
copyStringValue<pcl::traits::asType<pcl::PCLPointField::FLOAT32>::type> (
- st.at (total + c), cloud, idx, d, c);
+ st.at (total + c), cloud, idx, d, c, is);
break;
}
case pcl::PCLPointField::FLOAT64:
{
copyStringValue<pcl::traits::asType<pcl::PCLPointField::FLOAT64>::type> (
- st.at (total + c), cloud, idx, d, c);
+ st.at (total + c), cloud, idx, d, c, is);
break;
}
default:
toff += fields_sizes[i] * cloud.width * cloud.height;
}
// Copy it to the cloud
- for (std::size_t i = 0; i < cloud.width * cloud.height; ++i)
+ for (uindex_t i = 0; i < cloud.width * cloud.height; ++i)
{
for (std::size_t j = 0; j < pters.size (); ++j)
{
// Extra checks (not needed for ASCII)
int point_size = static_cast<int> (cloud.data.size () / (cloud.height * cloud.width));
// Once copied, we need to go over each field and check if it has NaN/Inf values and assign cloud.is_dense to true or false
- for (std::uint32_t i = 0; i < cloud.width * cloud.height; ++i)
+ for (uindex_t i = 0; i < cloud.width * cloud.height; ++i)
{
for (unsigned int d = 0; d < static_cast<unsigned int> (cloud.fields.size ()); ++d)
{
- for (std::uint32_t c = 0; c < cloud.fields[d].count; ++c)
+ for (uindex_t c = 0; c < cloud.fields[d].count; ++c)
{
switch (cloud.fields[d].datatype)
{
std::stringstream field_names, field_types, field_sizes, field_counts;
// Check if the size of the fields is smaller than the size of the point step
- unsigned int toffset = 0;
+ std::size_t toffset = 0;
for (std::size_t i = 0; i < cloud.fields.size (); ++i)
{
// If field offsets do not match, then we need to create fake fields
std::ofstream fs;
fs.precision (precision);
fs.imbue (std::locale::classic ());
- fs.open (file_name.c_str ()); // Open file
+ fs.open (file_name.c_str (), std::ios::binary); // Open file
if (!fs.is_open () || fs.fail ())
{
PCL_ERROR("[pcl::PCDWriter::writeASCII] Could not open file '%s' for writing! Error : %s\n", file_name.c_str (), strerror(errno));
}
// Go over all the points, and copy the data in the appropriate places
- for (std::size_t i = 0; i < cloud.width * cloud.height; ++i)
+ for (uindex_t i = 0; i < cloud.width * cloud.height; ++i)
{
for (std::size_t j = 0; j < pters.size (); ++j)
{
#include <pcl/point_types.h>
#include <pcl/common/io.h>
#include <pcl/io/ply_io.h>
-#include <pcl/io/boost.h>
#include <cstdlib>
#include <fstream>
// https://www.boost.org/doc/libs/1_70_0/libs/filesystem/doc/index.htm#Coding-guidelines
#define BOOST_FILESYSTEM_NO_DEPRECATED
#include <boost/filesystem.hpp>
+#include <boost/algorithm/string.hpp> // for split
namespace fs = boost::filesystem;
pcl::io::ply::ply_parser::list_property_definition_callbacks_type list_property_definition_callbacks;
pcl::io::ply::ply_parser::at<pcl::io::ply::uint8, pcl::io::ply::int32> (list_property_definition_callbacks) = [this] (const std::string& element_name, const std::string& property_name) { return listPropertyDefinitionCallback<pcl::io::ply::uint8, pcl::io::ply::int32> (element_name, property_name); };
+ pcl::io::ply::ply_parser::at<pcl::io::ply::uint8, pcl::io::ply::uint32> (list_property_definition_callbacks) = [this] (const std::string& element_name, const std::string& property_name) { return listPropertyDefinitionCallback<pcl::io::ply::uint8, pcl::io::ply::int32> (element_name, property_name); };
pcl::io::ply::ply_parser::at<pcl::io::ply::uint32, pcl::io::ply::float64> (list_property_definition_callbacks) = [this] (const std::string& element_name, const std::string& property_name) { return listPropertyDefinitionCallback<pcl::io::ply::uint32, pcl::io::ply::float64> (element_name, property_name); };
pcl::io::ply::ply_parser::at<pcl::io::ply::uint32, pcl::io::ply::float32> (list_property_definition_callbacks) = [this] (const std::string& element_name, const std::string& property_name) { return listPropertyDefinitionCallback<pcl::io::ply::uint32, pcl::io::ply::float32> (element_name, property_name); };
pcl::io::ply::ply_parser::at<pcl::io::ply::uint32, pcl::io::ply::uint32> (list_property_definition_callbacks) = [this] (const std::string& element_name, const std::string& property_name) { return listPropertyDefinitionCallback<pcl::io::ply::uint32, pcl::io::ply::uint32> (element_name, property_name); };
case pcl::PCLPointField::FLOAT64 : oss << " double "; break;
default :
{
- PCL_ERROR ("[pcl::PLYWriter::generateHeader] unknown data field type!");
+ PCL_ERROR ("[pcl::PLYWriter::generateHeader] unknown data field type!\n");
return ("");
}
}
}
}
+ // vtk requires face entry to load PLY
+ oss << "\nelement face 0";
+
if (use_camera)
{
oss << "\nelement camera 1"
*
*/
-#include <pcl/io/eigen.h>
-#include <pcl/io/boost.h>
#include <pcl/io/grabber.h>
#include <pcl/io/io_exception.h>
#include <pcl/point_cloud.h>
pcl::PointCloud<pcl::PointXYZ>::Ptr
RealSense2Grabber::convertDepthToPointXYZ ( const rs2::points& points )
{
- return convertRealsensePointsToPointCloud<pcl::PointXYZ> ( points, []( pcl::PointXYZ& p, const rs2::texture_coordinate* uvptr ) {} );
+ return convertRealsensePointsToPointCloud<pcl::PointXYZ> ( points, []( pcl::PointXYZ&, const rs2::texture_coordinate*) {} );
}
pcl::PointCloud<pcl::PointXYZRGB>::Ptr
cloud->width = sp.width ();
cloud->height = sp.height ();
cloud->is_dense = false;
- cloud->points.resize ( size () );
+ cloud->resize ( points.size () );
const auto cloud_vertices_ptr = points.get_vertices ();
const auto cloud_texture_ptr = points.get_texture_coordinates ();
+#if OPENMP_LEGACY_CONST_DATA_SHARING_RULE
#pragma omp parallel for \
default(none) \
- shared(cloud, cloud_vertices_ptr, mapColorFunc)
- for (int index = 0; index < cloud->size (); ++index)
+ shared(cloud, mapColorFunc)
+#else
+#pragma omp parallel for \
+ default(none) \
+ shared(cloud, cloud_texture_ptr, cloud_vertices_ptr, mapColorFunc)
+#endif
+ for (std::size_t index = 0; index < cloud->size (); ++index)
{
const auto ptr = cloud_vertices_ptr + index;
const auto uvptr = cloud_texture_ptr + index;
{
if (threshold > 15)
{
- PCL_WARN ("[pcl::RealSenseGrabber::setConfidenceThreshold] Attempted to set threshold outside valid range (0-15)");
+ PCL_WARN ("[pcl::RealSenseGrabber::setConfidenceThreshold] Attempted to set threshold outside valid range (0-15)\n");
}
else
{
*
*/
+#include <pcl/io/pcd_io.h> // for loadPCDFile, savePCDFile
#include <pcl/io/vtk_lib_io.h>
#include <pcl/io/impl/vtk_lib_io.hpp>
#include <pcl/PCLPointCloud2.h>
-#include <vtkVersion.h>
#include <vtkCellArray.h>
#include <vtkCellData.h>
#include <vtkDoubleArray.h>
mesh.cloud.width = mesh.cloud.height = 0;
mesh.cloud.is_dense = true;
+ if (poly_data->GetPoints () == nullptr)
+ {
+ PCL_ERROR ("[pcl::io::vtk2mesh] Given vtkPolyData is misformed (contains nullpointer instead of points).\n");
+ return (0);
+ }
vtkSmartPointer<vtkPoints> mesh_points = poly_data->GetPoints ();
vtkIdType nr_points = mesh_points->GetNumberOfPoints ();
vtkIdType nr_polygons = poly_data->GetNumberOfPolys ();
// Now handle the polygons
mesh.polygons.resize (nr_polygons);
+#ifdef VTK_CELL_ARRAY_V2
+ vtkIdType const *cell_points;
+#else
vtkIdType* cell_points;
+#endif
vtkIdType nr_cell_points;
vtkCellArray * mesh_polygons = poly_data->GetPolys ();
mesh_polygons->InitTraversal ();
int
pcl::io::mesh2vtk (const pcl::PolygonMesh& mesh, vtkSmartPointer<vtkPolyData>& poly_data)
{
- unsigned int nr_points = mesh.cloud.width * mesh.cloud.height;
+ auto nr_points = mesh.cloud.width * mesh.cloud.height;
unsigned int nr_polygons = static_cast<unsigned int> (mesh.polygons.size ());
// reset vtkPolyData object
colors->SetName ("Colors");
pcl::RGB rgb;
int offset = (idx_rgb != -1) ? mesh.cloud.fields[idx_rgb].offset : mesh.cloud.fields[idx_rgba].offset;
- for (vtkIdType cp = 0; cp < nr_points; ++cp)
+ for (pcl::uindex_t cp = 0; cp < nr_points; ++cp)
{
memcpy (&rgb, &mesh.cloud.data[cp * mesh.cloud.point_step + offset], sizeof (RGB));
const unsigned char color[3] = {rgb.r, rgb.g, rgb.b};
vtkSmartPointer<vtkFloatArray> normals = vtkSmartPointer<vtkFloatArray>::New ();
normals->SetNumberOfComponents (3);
float nx = 0.0f, ny = 0.0f, nz = 0.0f;
- for (vtkIdType cp = 0; cp < nr_points; ++cp)
+ for (pcl::uindex_t cp = 0; cp < nr_points; ++cp)
{
memcpy (&nx, &mesh.cloud.data[cp*mesh.cloud.point_step+mesh.cloud.fields[idx_normal_x].offset], sizeof (float));
memcpy (&ny, &mesh.cloud.data[cp*mesh.cloud.point_step+mesh.cloud.fields[idx_normal_y].offset], sizeof (float));
vtkSmartPointer<vtkCellArray> cloud_vertices = vtkSmartPointer<vtkCellArray>::New ();
vtkIdType pid[1];
- for (std::size_t point_idx = 0; point_idx < cloud->width * cloud->height; point_idx ++)
+ for (uindex_t point_idx = 0; point_idx < cloud->width * cloud->height; point_idx ++)
{
float point[3];
- int point_offset = (int (point_idx) * cloud->point_step);
- int offset = point_offset + cloud->fields[x_idx].offset;
+ auto point_offset = (point_idx * cloud->point_step);
+ auto offset = point_offset + cloud->fields[x_idx].offset;
memcpy (&point, &cloud->data[offset], sizeof (float)*3);
pid[0] = cloud_points->InsertNextPoint (point);
colors->SetNumberOfComponents (3);
colors->SetName ("rgb");
- for (std::size_t point_idx = 0; point_idx < cloud->width * cloud->height; point_idx ++)
+ for (uindex_t point_idx = 0; point_idx < cloud->width * cloud->height; point_idx ++)
{
unsigned char bgr[3];
- int point_offset = (int (point_idx) * cloud->point_step);
- int offset = point_offset + cloud->fields[rgb_idx].offset;
+ auto point_offset = (point_idx * cloud->point_step);
+ auto offset = point_offset + cloud->fields[rgb_idx].offset;
memcpy (&bgr, &cloud->data[offset], sizeof (unsigned char)*3);
colors->InsertNextTuple3(bgr[2], bgr[1], bgr[0]);
cloud_intensity->SetNumberOfComponents (1);
cloud_intensity->SetName("intensity");
- for (std::size_t point_idx = 0; point_idx < cloud->width * cloud->height; point_idx ++)
+ for (uindex_t point_idx = 0; point_idx < cloud->width * cloud->height; point_idx ++)
{
float intensity;
- int point_offset = (int (point_idx) * cloud->point_step);
- int offset = point_offset + cloud->fields[intensity_idx].offset;
+ auto point_offset = (point_idx * cloud->point_step);
+ auto offset = point_offset + cloud->fields[intensity_idx].offset;
memcpy (&intensity, &cloud->data[offset], sizeof(float));
cloud_intensity->InsertNextValue(intensity);
normals->SetNumberOfComponents(3); //3d normals (ie x,y,z)
normals->SetName("normals");
- for (std::size_t point_idx = 0; point_idx < cloud->width * cloud->height; point_idx ++)
+ for (uindex_t point_idx = 0; point_idx < cloud->width * cloud->height; point_idx ++)
{
float normal[3];
- int point_offset = (int (point_idx) * cloud->point_step);
- int offset = point_offset + cloud->fields[normal_x_idx].offset;
+ auto point_offset = (point_idx * cloud->point_step);
+ auto offset = point_offset + cloud->fields[normal_x_idx].offset;
memcpy (&normal, &cloud->data[offset], sizeof (float)*3);
normals->InsertNextTuple(normal);
#include <pcl/io/openni_grabber.h>
#include <pcl/common/time.h>
#include <pcl/console/parse.h>
+#include <iomanip> // for setprecision
class SimpleOpenNIProcessor
{
#include <pcl/point_types.h>
#include <pcl/io/openni_grabber.h>
#include <boost/circular_buffer.hpp>
+#include <boost/date_time/posix_time/posix_time.hpp> // for to_iso_string, local_time
#include <pcl/io/pcd_io.h>
#include <pcl/console/print.h>
#include <pcl/console/parse.h>
*/
#include <pcl/io/pcd_io.h>
+#include <boost/lexical_cast.hpp> // for lexical_cast
/** @brief PCL point object */
using PointT = pcl::PointXYZRGBA;
set(incs
"include/pcl/${SUBSYS_NAME}/kdtree.h"
"include/pcl/${SUBSYS_NAME}/io.h"
- "include/pcl/${SUBSYS_NAME}/flann.h"
"include/pcl/${SUBSYS_NAME}/kdtree_flann.h"
)
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Point Cloud Library (PCL) - www.pointclouds.org
- * Copyright (c) 2010-2012, Willow Garage, Inc.
- * Copyright (c) 2012-, Open Perception, Inc.
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of the copyright holder(s) nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#pragma once
-
-#include <pcl/pcl_macros.h>
-
-PCL_DEPRECATED_HEADER(1, 12, "")
-
-#if defined _MSC_VER
-# pragma warning(disable: 4267 4244)
-#endif
-
-#include <flann/flann.hpp>
-
-#if defined _MSC_VER
-# pragma warning(default: 4267)
-#endif
pcl::getApproximateIndices (
const typename pcl::PointCloud<Point1T>::ConstPtr &cloud_in,
const typename pcl::PointCloud<Point2T>::ConstPtr &cloud_ref,
- std::vector<int> &indices)
+ Indices &indices)
{
pcl::KdTreeFLANN<Point2T> tree;
tree.setInputCloud (cloud_ref);
- std::vector<int> nn_idx (1);
+ Indices nn_idx (1);
std::vector<float> nn_dists (1);
indices.resize (cloud_in->size ());
for (std::size_t i = 0; i < cloud_in->size (); ++i)
pcl::getApproximateIndices (
const typename pcl::PointCloud<PointT>::ConstPtr &cloud_in,
const typename pcl::PointCloud<PointT>::ConstPtr &cloud_ref,
- std::vector<int> &indices)
+ Indices &indices)
{
pcl::KdTreeFLANN<PointT> tree;
tree.setInputCloud (cloud_ref);
- std::vector<int> nn_idx (1);
+ Indices nn_idx (1);
std::vector<float> nn_dists (1);
indices.resize (cloud_in->size ());
for (std::size_t i = 0; i < cloud_in->size (); ++i)
, param_k_ (::flann::SearchParams (-1 , epsilon_))
, param_radius_ (::flann::SearchParams (-1, epsilon_, sorted))
{
+ if (!std::is_same<std::size_t, pcl::index_t>::value) {
+ const auto message = "FLANN is not optimized for current index type. Will incur "
+ "extra allocations and copy\n";
+ if (std::is_same<int, pcl::index_t>::value) {
+ PCL_DEBUG(message); // since this has been the default behavior till PCL 1.12
+ }
+ else {
+ PCL_WARN(message);
+ }
+ }
}
///////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT, typename Dist>
-pcl::KdTreeFLANN<PointT, Dist>::KdTreeFLANN (const KdTreeFLANN<PointT, Dist> &k)
+pcl::KdTreeFLANN<PointT, Dist>::KdTreeFLANN (const KdTreeFLANN<PointT, Dist> &k)
: pcl::KdTree<PointT> (false)
, flann_index_ ()
, identity_mapping_ (false)
}
///////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointT, typename Dist> void
+template <typename PointT, typename Dist> void
pcl::KdTreeFLANN<PointT, Dist>::setEpsilon (float eps)
{
epsilon_ = eps;
}
///////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointT, typename Dist> void
+template <typename PointT, typename Dist> void
pcl::KdTreeFLANN<PointT, Dist>::setSortedResults (bool sorted)
{
sorted_ = sorted;
}
///////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointT, typename Dist> void
+template <typename PointT, typename Dist> void
pcl::KdTreeFLANN<PointT, Dist>::setInputCloud (const PointCloudConstPtr &cloud, const IndicesConstPtr &indices)
{
cleanup (); // Perform an automatic cleanup of structures
input_ = cloud;
indices_ = indices;
-
+
// Allocate enough data
if (!input_)
{
{
convertCloudToArray (*input_);
}
- total_nr_points_ = static_cast<int> (index_mapping_.size ());
+ total_nr_points_ = static_cast<uindex_t> (index_mapping_.size ());
if (total_nr_points_ == 0)
{
PCL_ERROR ("[pcl::KdTreeFLANN::setInputCloud] Cannot create a KDTree with an empty input cloud!\n");
return;
}
- flann_index_.reset (new FLANNIndex (::flann::Matrix<float> (cloud_.get (),
- index_mapping_.size (),
+ flann_index_.reset (new FLANNIndex (::flann::Matrix<float> (cloud_.get (),
+ index_mapping_.size (),
dim_),
::flann::KDTreeSingleIndexParams (15))); // max 15 points/leaf
flann_index_->buildIndex ();
}
///////////////////////////////////////////////////////////////////////////////////////////
+namespace pcl {
+namespace detail {
+// Replace using constexpr in C++17
+template <class IndexT,
+ class A,
+ class B,
+ class C,
+ class D,
+ class F,
+ CompatWithFlann<IndexT> = true>
+int
+knn_search(A& index, B& query, C& k_indices, D& dists, unsigned int k, F& params)
+{
+ // Wrap k_indices vector (no data allocation)
+ ::flann::Matrix<index_t> k_indices_mat(&k_indices[0], 1, k);
+ return index.knnSearch(query, k_indices_mat, dists, k, params);
+}
+
+template <class IndexT,
+ class A,
+ class B,
+ class C,
+ class D,
+ class F,
+ NotCompatWithFlann<IndexT> = true>
+int
+knn_search(A& index, B& query, C& k_indices, D& dists, unsigned int k, F& params)
+{
+ std::vector<std::size_t> indices(k);
+ k_indices.resize(k);
+ // Wrap indices vector (no data allocation)
+ ::flann::Matrix<std::size_t> indices_mat(&indices[0], 1, k);
+ auto ret = index.knnSearch(query, indices_mat, dists, k, params);
+ std::copy(indices.cbegin(), indices.cend(), k_indices.begin());
+ return ret;
+}
+
+template <class IndexT, class A, class B, class F, CompatWithFlann<IndexT> = true>
+int
+knn_search(A& index,
+ B& query,
+ std::vector<Indices>& k_indices,
+ std::vector<std::vector<float>>& dists,
+ unsigned int k,
+ F& params)
+{
+ return index.knnSearch(query, k_indices, dists, k, params);
+}
+
+template <class IndexT, class A, class B, class F, NotCompatWithFlann<IndexT> = true>
+int
+knn_search(A& index,
+ B& query,
+ std::vector<Indices>& k_indices,
+ std::vector<std::vector<float>>& dists,
+ unsigned int k,
+ F& params)
+{
+ std::vector<std::vector<std::size_t>> indices;
+ // flann will resize accordingly
+ auto ret = index.knnSearch(query, indices, dists, k, params);
+
+ k_indices.resize(indices.size());
+ {
+ auto it = indices.cbegin();
+ auto jt = k_indices.begin();
+ for (; it != indices.cend(); ++it, ++jt) {
+ jt->resize(it->size());
+ std::copy(it->cbegin(), it->cend(), jt->begin());
+ }
+ }
+ return ret;
+}
+} // namespace detail
+template <class FlannIndex,
+ class Query,
+ class Indices,
+ class Distances,
+ class SearchParams>
+int
+knn_search(const FlannIndex& index,
+ const Query& query,
+ Indices& indices,
+ Distances& dists,
+ unsigned int k,
+ const SearchParams& params)
+{
+ return detail::knn_search<pcl::index_t>(index, query, indices, dists, k, params);
+}
+} // namespace pcl
+
template <typename PointT, typename Dist> int
-pcl::KdTreeFLANN<PointT, Dist>::nearestKSearch (const PointT &point, int k,
- std::vector<int> &k_indices,
+pcl::KdTreeFLANN<PointT, Dist>::nearestKSearch (const PointT &point, unsigned int k,
+ Indices &k_indices,
std::vector<float> &k_distances) const
{
assert (point_representation_->isValid (point) && "Invalid (NaN, Inf) point coordinates given to nearestKSearch!");
k_indices.resize (k);
k_distances.resize (k);
+ if (k==0)
+ return 0;
+
std::vector<float> query (dim_);
point_representation_->vectorize (static_cast<PointT> (point), query);
- ::flann::Matrix<int> k_indices_mat (&k_indices[0], 1, k);
+ // Wrap the k_distances vector (no data copy)
::flann::Matrix<float> k_distances_mat (&k_distances[0], 1, k);
- // Wrap the k_indices and k_distances vectors (no data copy)
- flann_index_->knnSearch (::flann::Matrix<float> (&query[0], 1, dim_),
- k_indices_mat, k_distances_mat,
- k, param_k_);
+
+ knn_search(*flann_index_,
+ ::flann::Matrix<float>(&query[0], 1, dim_),
+ k_indices,
+ k_distances_mat,
+ k,
+ param_k_);
// Do mapping to original point cloud
- if (!identity_mapping_)
+ if (!identity_mapping_)
{
for (std::size_t i = 0; i < static_cast<std::size_t> (k); ++i)
{
- int& neighbor_index = k_indices[i];
+ auto& neighbor_index = k_indices[i];
neighbor_index = index_mapping_[neighbor_index];
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointT, typename Dist> int
-pcl::KdTreeFLANN<PointT, Dist>::radiusSearch (const PointT &point, double radius, std::vector<int> &k_indices,
+namespace pcl {
+namespace detail {
+// Replace using constexpr in C++17
+template <class IndexT,
+ class A,
+ class B,
+ class C,
+ class D,
+ class F,
+ CompatWithFlann<IndexT> = true>
+int
+radius_search(A& index, B& query, C& k_indices, D& dists, float radius, F& params)
+{
+ std::vector<pcl::Indices> indices(1);
+ int neighbors_in_radius = index.radiusSearch(query, indices, dists, radius, params);
+ k_indices = std::move(indices[0]);
+ return neighbors_in_radius;
+}
+
+template <class IndexT,
+ class A,
+ class B,
+ class C,
+ class D,
+ class F,
+ NotCompatWithFlann<IndexT> = true>
+int
+radius_search(A& index, B& query, C& k_indices, D& dists, float radius, F& params)
+{
+ std::vector<std::vector<std::size_t>> indices(1);
+ int neighbors_in_radius = index.radiusSearch(query, indices, dists, radius, params);
+ k_indices.resize(indices[0].size());
+ std::copy(indices[0].cbegin(), indices[0].cend(), k_indices.begin());
+ return neighbors_in_radius;
+}
+
+template <class IndexT, class A, class B, class F, CompatWithFlann<IndexT> = true>
+int
+radius_search(A& index,
+ B& query,
+ std::vector<Indices>& k_indices,
+ std::vector<std::vector<float>>& dists,
+ float radius,
+ F& params)
+{
+ return index.radiusSearch(query, k_indices, dists, radius, params);
+}
+
+template <class IndexT, class A, class B, class F, NotCompatWithFlann<IndexT> = true>
+int
+radius_search(A& index,
+ B& query,
+ std::vector<Indices>& k_indices,
+ std::vector<std::vector<float>>& dists,
+ float radius,
+ F& params)
+{
+ std::vector<std::vector<std::size_t>> indices;
+ // flann will resize accordingly
+ auto ret = index.radiusSearch(query, indices, dists, radius, params);
+
+ k_indices.resize(indices.size());
+ {
+ auto it = indices.cbegin();
+ auto jt = k_indices.begin();
+ for (; it != indices.cend(); ++it, ++jt) {
+ jt->resize(it->size());
+ std::copy(it->cbegin(), it->cend(), jt->begin());
+ }
+ }
+ return ret;
+}
+} // namespace detail
+template <class FlannIndex,
+ class Query,
+ class Indices,
+ class Distances,
+ class SearchParams>
+int
+radius_search(const FlannIndex& index,
+ const Query& query,
+ Indices& indices,
+ Distances& dists,
+ float radius,
+ const SearchParams& params)
+{
+ return detail::radius_search<pcl::index_t>(
+ index, query, indices, dists, radius, params);
+}
+} // namespace pcl
+
+template <typename PointT, typename Dist> int
+pcl::KdTreeFLANN<PointT, Dist>::radiusSearch (const PointT &point, double radius, Indices &k_indices,
std::vector<float> &k_sqr_dists, unsigned int max_nn) const
{
assert (point_representation_->isValid (point) && "Invalid (NaN, Inf) point coordinates given to radiusSearch!");
point_representation_->vectorize (static_cast<PointT> (point), query);
// Has max_nn been set properly?
- if (max_nn == 0 || max_nn > static_cast<unsigned int> (total_nr_points_))
+ if (max_nn == 0 || max_nn > total_nr_points_)
max_nn = total_nr_points_;
- std::vector<std::vector<int> > indices(1);
std::vector<std::vector<float> > dists(1);
::flann::SearchParams params (param_radius_);
- if (max_nn == static_cast<unsigned int>(total_nr_points_))
+ if (max_nn == total_nr_points_)
params.max_neighbors = -1; // return all neighbors in radius
else
params.max_neighbors = max_nn;
- int neighbors_in_radius = flann_index_->radiusSearch (::flann::Matrix<float> (&query[0], 1, dim_),
- indices,
- dists,
- static_cast<float> (radius * radius),
- params);
+ auto query_mat = ::flann::Matrix<float>(&query[0], 1, dim_);
+ int neighbors_in_radius = radius_search(*flann_index_,
+ query_mat,
+ k_indices,
+ dists,
+ static_cast<float>(radius * radius),
+ params);
- k_indices = indices[0];
k_sqr_dists = dists[0];
// Do mapping to original point cloud
- if (!identity_mapping_)
+ if (!identity_mapping_)
{
for (int i = 0; i < neighbors_in_radius; ++i)
{
- int& neighbor_index = k_indices[i];
+ auto& neighbor_index = k_indices[i];
neighbor_index = index_mapping_[neighbor_index];
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointT, typename Dist> void
+template <typename PointT, typename Dist> void
pcl::KdTreeFLANN<PointT, Dist>::cleanup ()
{
// Data array cleanup
}
///////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointT, typename Dist> void
+template <typename PointT, typename Dist> void
pcl::KdTreeFLANN<PointT, Dist>::convertCloudToArray (const PointCloud &cloud)
{
// No point in doing anything if the array is empty
- if (cloud.points.empty ())
+ if (cloud.empty ())
{
cloud_.reset ();
return;
}
///////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointT, typename Dist> void
-pcl::KdTreeFLANN<PointT, Dist>::convertCloudToArray (const PointCloud &cloud, const std::vector<int> &indices)
+template <typename PointT, typename Dist> void
+pcl::KdTreeFLANN<PointT, Dist>::convertCloudToArray (const PointCloud &cloud, const Indices &indices)
{
// No point in doing anything if the array is empty
- if (cloud.points.empty ())
+ if (cloud.empty ())
{
cloud_.reset ();
return;
float* cloud_ptr = cloud_.get ();
index_mapping_.reserve (original_no_of_points);
// its a subcloud -> false
- // true only identity:
+ // true only identity:
// - indices size equals cloud size
// - indices only contain values between 0 and cloud.size - 1
// - no index is multiple times in the list
// => index is complete
// But we can not guarantee that => identity_mapping_ = false
identity_mapping_ = false;
-
- for (const int &index : indices)
+
+ for (const auto &index : indices)
{
// Check if the point is invalid
if (!point_representation_->isValid (cloud[index]))
// map from 0 - N -> indices [0] - indices [N]
index_mapping_.push_back (index); // If the returned index should be for the indices vector
-
+
point_representation_->vectorize (cloud[index], cloud_ptr);
cloud_ptr += dim_;
}
template <typename PointT> void
getApproximateIndices (const typename pcl::PointCloud<PointT>::ConstPtr &cloud_in,
const typename pcl::PointCloud<PointT>::ConstPtr &cloud_ref,
- std::vector<int> &indices);
+ Indices &indices);
/** \brief Get a set of approximate indices for a given point cloud into a reference point cloud.
* The coordinates of the two point clouds can differ. The method uses an internal KdTree for
template <typename Point1T, typename Point2T> void
getApproximateIndices (const typename pcl::PointCloud<Point1T>::ConstPtr &cloud_in,
const typename pcl::PointCloud<Point2T>::ConstPtr &cloud_ref,
- std::vector<int> &indices);
+ Indices &indices);
}
#include <pcl/kdtree/impl/io.hpp>
#pragma once
-#include <climits>
#include <pcl/memory.h>
#include <pcl/pcl_macros.h>
#include <pcl/point_cloud.h>
#include <pcl/point_representation.h>
-#include <pcl/common/io.h>
#include <pcl/common/copy_point.h>
namespace pcl
class KdTree
{
public:
- using IndicesPtr = shared_ptr<std::vector<int> >;
- using IndicesConstPtr = shared_ptr<const std::vector<int> >;
+ using IndicesPtr = shared_ptr<Indices >;
+ using IndicesConstPtr = shared_ptr<const Indices >;
using PointCloud = pcl::PointCloud<PointT>;
using PointCloudPtr = typename PointCloud::Ptr;
using Ptr = shared_ptr<KdTree<PointT> >;
using ConstPtr = shared_ptr<const KdTree<PointT> >;
- /** \brief Empty constructor for KdTree. Sets some internal values to their defaults.
- * \param[in] sorted set to true if the application that the tree will be used for requires sorted nearest neighbor indices (default). False otherwise.
+ /** \brief Empty constructor for KdTree. Sets some internal values to their defaults.
+ * \param[in] sorted set to true if the application that the tree will be used for requires sorted nearest neighbor indices (default). False otherwise.
*/
KdTree (bool sorted = true) : input_(),
- epsilon_(0.0f), min_pts_(1), sorted_(sorted),
+ epsilon_(0.0f), min_pts_(1), sorted_(sorted),
point_representation_ (new DefaultPointRepresentation<PointT>)
{
};
return (input_);
}
- /** \brief Provide a pointer to the point representation to use to convert points into k-D vectors.
+ /** \brief Provide a pointer to the point representation to use to convert points into k-D vectors.
* \param[in] point_representation the const boost shared pointer to a PointRepresentation
*/
inline void
* \param[in] p_q the given query point
* \param[in] k the number of neighbors to search for
* \param[out] k_indices the resultant indices of the neighboring points (must be resized to \a k a priori!)
- * \param[out] k_sqr_distances the resultant squared distances to the neighboring points (must be resized to \a k
+ * \param[out] k_sqr_distances the resultant squared distances to the neighboring points (must be resized to \a k
* a priori!)
* \return number of neighbors found
*/
- virtual int
- nearestKSearch (const PointT &p_q, int k,
- std::vector<int> &k_indices, std::vector<float> &k_sqr_distances) const = 0;
+ virtual int
+ nearestKSearch (const PointT &p_q, unsigned int k,
+ Indices &k_indices, std::vector<float> &k_sqr_distances) const = 0;
/** \brief Search for k-nearest neighbors for the given query point.
- *
+ *
* \attention This method does not do any bounds checking for the input index
* (i.e., index >= cloud.size () || index < 0), and assumes valid (i.e., finite) data.
- *
+ *
* \param[in] cloud the point cloud data
* \param[in] index a \a valid index in \a cloud representing a \a valid (i.e., finite) query point
* \param[in] k the number of neighbors to search for
* \param[out] k_indices the resultant indices of the neighboring points (must be resized to \a k a priori!)
- * \param[out] k_sqr_distances the resultant squared distances to the neighboring points (must be resized to \a k
+ * \param[out] k_sqr_distances the resultant squared distances to the neighboring points (must be resized to \a k
* a priori!)
- *
+ *
* \return number of neighbors found
- *
+ *
* \exception asserts in debug mode if the index is not between 0 and the maximum number of points
*/
- virtual int
- nearestKSearch (const PointCloud &cloud, int index, int k,
- std::vector<int> &k_indices, std::vector<float> &k_sqr_distances) const
+ virtual int
+ nearestKSearch (const PointCloud &cloud, int index, unsigned int k,
+ Indices &k_indices, std::vector<float> &k_sqr_distances) const
{
assert (index >= 0 && index < static_cast<int> (cloud.size ()) && "Out-of-bounds error in nearestKSearch!");
return (nearestKSearch (cloud[index], k, k_indices, k_sqr_distances));
}
- /** \brief Search for k-nearest neighbors for the given query point.
+ /** \brief Search for k-nearest neighbors for the given query point.
* This method accepts a different template parameter for the point type.
* \param[in] point the given query point
* \param[in] k the number of neighbors to search for
* \param[out] k_indices the resultant indices of the neighboring points (must be resized to \a k a priori!)
- * \param[out] k_sqr_distances the resultant squared distances to the neighboring points (must be resized to \a k
+ * \param[out] k_sqr_distances the resultant squared distances to the neighboring points (must be resized to \a k
* a priori!)
* \return number of neighbors found
*/
- template <typename PointTDiff> inline int
- nearestKSearchT (const PointTDiff &point, int k,
- std::vector<int> &k_indices, std::vector<float> &k_sqr_distances) const
+ template <typename PointTDiff> inline int
+ nearestKSearchT (const PointTDiff &point, unsigned int k,
+ Indices &k_indices, std::vector<float> &k_sqr_distances) const
{
PointT p;
copyPoint (point, p);
}
/** \brief Search for k-nearest neighbors for the given query point (zero-copy).
- *
+ *
* \attention This method does not do any bounds checking for the input index
* (i.e., index >= cloud.size () || index < 0), and assumes valid (i.e., finite) data.
- *
- * \param[in] index a \a valid index representing a \a valid query point in the dataset given
- * by \a setInputCloud. If indices were given in setInputCloud, index will be the position in
+ *
+ * \param[in] index a \a valid index representing a \a valid query point in the dataset given
+ * by \a setInputCloud. If indices were given in setInputCloud, index will be the position in
* the indices vector.
- *
+ *
* \param[in] k the number of neighbors to search for
* \param[out] k_indices the resultant indices of the neighboring points (must be resized to \a k a priori!)
- * \param[out] k_sqr_distances the resultant squared distances to the neighboring points (must be resized to \a k
+ * \param[out] k_sqr_distances the resultant squared distances to the neighboring points (must be resized to \a k
* a priori!)
* \return number of neighbors found
- *
+ *
* \exception asserts in debug mode if the index is not between 0 and the maximum number of points
*/
- virtual int
- nearestKSearch (int index, int k,
- std::vector<int> &k_indices, std::vector<float> &k_sqr_distances) const
+ virtual int
+ nearestKSearch (int index, unsigned int k,
+ Indices &k_indices, std::vector<float> &k_sqr_distances) const
{
if (indices_ == nullptr)
{
* returned.
* \return number of neighbors found in radius
*/
- virtual int
- radiusSearch (const PointT &p_q, double radius, std::vector<int> &k_indices,
+ virtual int
+ radiusSearch (const PointT &p_q, double radius, Indices &k_indices,
std::vector<float> &k_sqr_distances, unsigned int max_nn = 0) const = 0;
/** \brief Search for all the nearest neighbors of the query point in a given radius.
- *
+ *
* \attention This method does not do any bounds checking for the input index
* (i.e., index >= cloud.size () || index < 0), and assumes valid (i.e., finite) data.
- *
+ *
* \param[in] cloud the point cloud data
* \param[in] index a \a valid index in \a cloud representing a \a valid (i.e., finite) query point
* \param[in] radius the radius of the sphere bounding all of p_q's neighbors
* 0 or to a number higher than the number of points in the input cloud, all neighbors in \a radius will be
* returned.
* \return number of neighbors found in radius
- *
+ *
* \exception asserts in debug mode if the index is not between 0 and the maximum number of points
*/
- virtual int
- radiusSearch (const PointCloud &cloud, int index, double radius,
- std::vector<int> &k_indices, std::vector<float> &k_sqr_distances,
+ virtual int
+ radiusSearch (const PointCloud &cloud, int index, double radius,
+ Indices &k_indices, std::vector<float> &k_sqr_distances,
unsigned int max_nn = 0) const
{
assert (index >= 0 && index < static_cast<int> (cloud.size ()) && "Out-of-bounds error in radiusSearch!");
* returned.
* \return number of neighbors found in radius
*/
- template <typename PointTDiff> inline int
- radiusSearchT (const PointTDiff &point, double radius, std::vector<int> &k_indices,
+ template <typename PointTDiff> inline int
+ radiusSearchT (const PointTDiff &point, double radius, Indices &k_indices,
std::vector<float> &k_sqr_distances, unsigned int max_nn = 0) const
{
PointT p;
}
/** \brief Search for all the nearest neighbors of the query point in a given radius (zero-copy).
- *
+ *
* \attention This method does not do any bounds checking for the input index
* (i.e., index >= cloud.size () || index < 0), and assumes valid (i.e., finite) data.
- *
- * \param[in] index a \a valid index representing a \a valid query point in the dataset given
- * by \a setInputCloud. If indices were given in setInputCloud, index will be the position in
+ *
+ * \param[in] index a \a valid index representing a \a valid query point in the dataset given
+ * by \a setInputCloud. If indices were given in setInputCloud, index will be the position in
* the indices vector.
- *
+ *
* \param[in] radius the radius of the sphere bounding all of p_q's neighbors
* \param[out] k_indices the resultant indices of the neighboring points
* \param[out] k_sqr_distances the resultant squared distances to the neighboring points
* 0 or to a number higher than the number of points in the input cloud, all neighbors in \a radius will be
* returned.
* \return number of neighbors found in radius
- *
+ *
* \exception asserts in debug mode if the index is not between 0 and the maximum number of points
*/
- virtual int
- radiusSearch (int index, double radius, std::vector<int> &k_indices,
+ virtual int
+ radiusSearch (int index, double radius, Indices &k_indices,
std::vector<float> &k_sqr_distances, unsigned int max_nn = 0) const
{
if (indices_ == nullptr)
return (epsilon_);
}
- /** \brief Minimum allowed number of k nearest neighbors points that a viable result must contain.
- * \param[in] min_pts the minimum number of neighbors in a viable neighborhood
+ /** \brief Minimum allowed number of k nearest neighbors points that a viable result must contain.
+ * \param[in] min_pts the minimum number of neighbors in a viable neighborhood
*/
inline void
setMinPts (int min_pts)
PointRepresentationConstPtr point_representation_;
/** \brief Class getName method. */
- virtual std::string
+ virtual std::string
getName () const = 0;
};
}
namespace pcl
{
- // Forward declarations
- template <typename T> class PointRepresentation;
-
- /** \brief KdTreeFLANN is a generic type of 3D spatial locator using kD-tree structures. The class is making use of
- * the FLANN (Fast Library for Approximate Nearest Neighbor) project by Marius Muja and David Lowe.
- *
- * \author Radu B. Rusu, Marius Muja
- * \ingroup kdtree
- */
- template <typename PointT, typename Dist = ::flann::L2_Simple<float> >
- class KdTreeFLANN : public pcl::KdTree<PointT>
+namespace detail {
+// Helper struct to create a compatible Matrix and copy data back when needed
+// Replace using if constexpr in C++17
+template <typename IndexT>
+struct compat_with_flann : std::false_type {};
+
+template <>
+struct compat_with_flann<std::size_t> : std::true_type {};
+
+template <typename IndexT>
+using CompatWithFlann = std::enable_if_t<compat_with_flann<IndexT>::value, bool>;
+template <typename IndexT>
+using NotCompatWithFlann = std::enable_if_t<!compat_with_flann<IndexT>::value, bool>;
+} // namespace detail
+
+/**
+ * @brief Comaptibility template function to allow use of various types of indices with
+ * FLANN
+ * @details Template is used for all params to not constrain any FLANN side capability
+ * @param[in|out] index A index searcher, of type ::flann::Index<Dist> or similar, where
+ * Dist is a template for computing distance between 2 points
+ * @param[in] query A ::flann::Matrix<float> or compatible matrix representation of the
+ * query point
+ * @param[out] indices Indices found in radius
+ * @param[out] dists Computed distance matrix
+ * @param[in] radius Threshold for consideration
+ * @param[in] params Any parameters to pass to the radius_search call
+ */
+template <class FlannIndex,
+ class Query,
+ class Indices,
+ class Distances,
+ class SearchParams>
+int
+radius_search(const FlannIndex& index,
+ const Query& query,
+ Indices& indices,
+ Distances& dists,
+ float radius,
+ const SearchParams& params);
+
+/**
+ * @brief Comaptibility template function to allow use of various types of indices with
+ * FLANN
+ * @details Template is used for all params to not constrain any FLANN side capability
+ * @param[in|out] index A index searcher, of type ::flann::Index<Dist> or similar, where
+ * Dist is a template for computing distance between 2 points
+ * @param[in] query A ::flann::Matrix<float> or compatible matrix representation of the
+ * query point
+ * @param[out] indices Neighboring k indices found
+ * @param[out] dists Computed distance matrix
+ * @param[in] radius Threshold for consideration
+ * @param[in] params Any parameters to pass to the radius_search call
+ */
+template <class FlannIndex,
+ class Query,
+ class Indices,
+ class Distances,
+ class SearchParams>
+int
+knn_search(const FlannIndex& index,
+ const Query& query,
+ Indices& indices,
+ Distances& dists,
+ unsigned int k,
+ const SearchParams& params);
+
+/** \brief KdTreeFLANN is a generic type of 3D spatial locator using kD-tree structures.
+ * The class is making use of the FLANN (Fast Library for Approximate Nearest Neighbor)
+ * project by Marius Muja and David Lowe.
+ *
+ * \author Radu B. Rusu, Marius Muja
+ * \ingroup kdtree
+ */
+template <typename PointT, typename Dist = ::flann::L2_Simple<float>>
+class KdTreeFLANN : public pcl::KdTree<PointT> {
+public:
+ using KdTree<PointT>::input_;
+ using KdTree<PointT>::indices_;
+ using KdTree<PointT>::epsilon_;
+ using KdTree<PointT>::sorted_;
+ using KdTree<PointT>::point_representation_;
+ using KdTree<PointT>::nearestKSearch;
+ using KdTree<PointT>::radiusSearch;
+
+ using PointCloud = typename KdTree<PointT>::PointCloud;
+ using PointCloudConstPtr = typename KdTree<PointT>::PointCloudConstPtr;
+
+ using IndicesPtr = shared_ptr<Indices>;
+ using IndicesConstPtr = shared_ptr<const Indices>;
+
+ using FLANNIndex = ::flann::Index<Dist>;
+
+ // Boost shared pointers
+ using Ptr = shared_ptr<KdTreeFLANN<PointT, Dist>>;
+ using ConstPtr = shared_ptr<const KdTreeFLANN<PointT, Dist>>;
+
+ /** \brief Default Constructor for KdTreeFLANN.
+ * \param[in] sorted set to true if the application that the tree will be used for
+ * requires sorted nearest neighbor indices (default). False otherwise.
+ *
+ * By setting sorted to false, the \ref radiusSearch operations will be faster.
+ */
+ KdTreeFLANN(bool sorted = true);
+
+ /** \brief Copy constructor
+ * \param[in] k the tree to copy into this
+ */
+ KdTreeFLANN(const KdTreeFLANN<PointT, Dist>& k);
+
+ /** \brief Copy operator
+ * \param[in] k the tree to copy into this
+ */
+ inline KdTreeFLANN<PointT, Dist>&
+ operator=(const KdTreeFLANN<PointT, Dist>& k)
{
- public:
- using KdTree<PointT>::input_;
- using KdTree<PointT>::indices_;
- using KdTree<PointT>::epsilon_;
- using KdTree<PointT>::sorted_;
- using KdTree<PointT>::point_representation_;
- using KdTree<PointT>::nearestKSearch;
- using KdTree<PointT>::radiusSearch;
-
- using PointCloud = typename KdTree<PointT>::PointCloud;
- using PointCloudConstPtr = typename KdTree<PointT>::PointCloudConstPtr;
-
- using IndicesPtr = shared_ptr<std::vector<int> >;
- using IndicesConstPtr = shared_ptr<const std::vector<int> >;
-
- using FLANNIndex = ::flann::Index<Dist>;
-
- // Boost shared pointers
- using Ptr = shared_ptr<KdTreeFLANN<PointT, Dist> >;
- using ConstPtr = shared_ptr<const KdTreeFLANN<PointT, Dist> >;
-
- /** \brief Default Constructor for KdTreeFLANN.
- * \param[in] sorted set to true if the application that the tree will be used for requires sorted nearest neighbor indices (default). False otherwise.
- *
- * By setting sorted to false, the \ref radiusSearch operations will be faster.
- */
- KdTreeFLANN (bool sorted = true);
-
- /** \brief Copy constructor
- * \param[in] k the tree to copy into this
- */
- KdTreeFLANN (const KdTreeFLANN<PointT, Dist> &k);
-
- /** \brief Copy operator
- * \param[in] k the tree to copy into this
- */
- inline KdTreeFLANN<PointT, Dist>&
- operator = (const KdTreeFLANN<PointT, Dist>& k)
- {
- KdTree<PointT>::operator=(k);
- flann_index_ = k.flann_index_;
- cloud_ = k.cloud_;
- index_mapping_ = k.index_mapping_;
- identity_mapping_ = k.identity_mapping_;
- dim_ = k.dim_;
- total_nr_points_ = k.total_nr_points_;
- param_k_ = k.param_k_;
- param_radius_ = k.param_radius_;
- return (*this);
- }
-
- /** \brief Set the search epsilon precision (error bound) for nearest neighbors searches.
- * \param[in] eps precision (error bound) for nearest neighbors searches
- */
- void
- setEpsilon (float eps) override;
-
- void
- setSortedResults (bool sorted);
-
- inline Ptr makeShared () { return Ptr (new KdTreeFLANN<PointT, Dist> (*this)); }
-
- /** \brief Destructor for KdTreeFLANN.
- * Deletes all allocated data arrays and destroys the kd-tree structures.
- */
- ~KdTreeFLANN ()
- {
- cleanup ();
- }
-
- /** \brief Provide a pointer to the input dataset.
- * \param[in] cloud the const boost shared pointer to a PointCloud message
- * \param[in] indices the point indices subset that is to be used from \a cloud - if NULL the whole cloud is used
- */
- void
- setInputCloud (const PointCloudConstPtr &cloud, const IndicesConstPtr &indices = IndicesConstPtr ()) override;
-
- /** \brief Search for k-nearest neighbors for the given query point.
- *
- * \attention This method does not do any bounds checking for the input index
- * (i.e., index >= cloud.size () || index < 0), and assumes valid (i.e., finite) data.
- *
- * \param[in] point a given \a valid (i.e., finite) query point
- * \param[in] k the number of neighbors to search for
- * \param[out] k_indices the resultant indices of the neighboring points (must be resized to \a k a priori!)
- * \param[out] k_sqr_distances the resultant squared distances to the neighboring points (must be resized to \a k
- * a priori!)
- * \return number of neighbors found
- *
- * \exception asserts in debug mode if the index is not between 0 and the maximum number of points
- */
- int
- nearestKSearch (const PointT &point, int k,
- std::vector<int> &k_indices, std::vector<float> &k_sqr_distances) const override;
-
- /** \brief Search for all the nearest neighbors of the query point in a given radius.
- *
- * \attention This method does not do any bounds checking for the input index
- * (i.e., index >= cloud.size () || index < 0), and assumes valid (i.e., finite) data.
- *
- * \param[in] point a given \a valid (i.e., finite) query point
- * \param[in] radius the radius of the sphere bounding all of p_q's neighbors
- * \param[out] k_indices the resultant indices of the neighboring points
- * \param[out] k_sqr_distances the resultant squared distances to the neighboring points
- * \param[in] max_nn if given, bounds the maximum returned neighbors to this value. If \a max_nn is set to
- * 0 or to a number higher than the number of points in the input cloud, all neighbors in \a radius will be
- * returned.
- * \return number of neighbors found in radius
- *
- * \exception asserts in debug mode if the index is not between 0 and the maximum number of points
- */
- int
- radiusSearch (const PointT &point, double radius, std::vector<int> &k_indices,
- std::vector<float> &k_sqr_distances, unsigned int max_nn = 0) const override;
-
- private:
- /** \brief Internal cleanup method. */
- void
- cleanup ();
-
- /** \brief Converts a PointCloud to the internal FLANN point array representation. Returns the number
- * of points.
- * \param cloud the PointCloud
- */
- void
- convertCloudToArray (const PointCloud &cloud);
-
- /** \brief Converts a PointCloud with a given set of indices to the internal FLANN point array
- * representation. Returns the number of points.
- * \param[in] cloud the PointCloud data
- * \param[in] indices the point cloud indices
- */
- void
- convertCloudToArray (const PointCloud &cloud, const std::vector<int> &indices);
-
- private:
- /** \brief Class getName method. */
- std::string
- getName () const override { return ("KdTreeFLANN"); }
-
- /** \brief A FLANN index object. */
- std::shared_ptr<FLANNIndex> flann_index_;
-
- /** \brief Internal pointer to data. TODO: replace with std::shared_ptr<float[]> with C++17*/
- std::shared_ptr<float> cloud_;
-
- /** \brief mapping between internal and external indices. */
- std::vector<int> index_mapping_;
-
- /** \brief whether the mapping between internal and external indices is identity */
- bool identity_mapping_;
-
- /** \brief Tree dimensionality (i.e. the number of dimensions per point). */
- int dim_;
-
- /** \brief The total size of the data (either equal to the number of points in the input cloud or to the number of indices - if passed). */
- int total_nr_points_;
-
- /** \brief The KdTree search parameters for K-nearest neighbors. */
- ::flann::SearchParams param_k_;
-
- /** \brief The KdTree search parameters for radius search. */
- ::flann::SearchParams param_radius_;
+ KdTree<PointT>::operator=(k);
+ flann_index_ = k.flann_index_;
+ cloud_ = k.cloud_;
+ index_mapping_ = k.index_mapping_;
+ identity_mapping_ = k.identity_mapping_;
+ dim_ = k.dim_;
+ total_nr_points_ = k.total_nr_points_;
+ param_k_ = k.param_k_;
+ param_radius_ = k.param_radius_;
+ return (*this);
+ }
+
+ /** \brief Set the search epsilon precision (error bound) for nearest neighbors
+ * searches. \param[in] eps precision (error bound) for nearest neighbors searches
+ */
+ void
+ setEpsilon(float eps) override;
+
+ void
+ setSortedResults(bool sorted);
+
+ inline Ptr
+ makeShared()
+ {
+ return Ptr(new KdTreeFLANN<PointT, Dist>(*this));
+ }
+
+ /** \brief Destructor for KdTreeFLANN.
+ * Deletes all allocated data arrays and destroys the kd-tree structures.
+ */
+ ~KdTreeFLANN() { cleanup(); }
+
+ /** \brief Provide a pointer to the input dataset.
+ * \param[in] cloud the const boost shared pointer to a PointCloud message
+ * \param[in] indices the point indices subset that is to be used from \a cloud - if
+ * NULL the whole cloud is used
+ */
+ void
+ setInputCloud(const PointCloudConstPtr& cloud,
+ const IndicesConstPtr& indices = IndicesConstPtr()) override;
+
+ /** \brief Search for k-nearest neighbors for the given query point.
+ *
+ * \attention This method does not do any bounds checking for the input index
+ * (i.e., index >= cloud.size () || index < 0), and assumes valid (i.e., finite) data.
+ *
+ * \param[in] point a given \a valid (i.e., finite) query point
+ * \param[in] k the number of neighbors to search for
+ * \param[out] k_indices the resultant indices of the neighboring points (must be
+ * resized to \a k a priori!) \param[out] k_sqr_distances the resultant squared
+ * distances to the neighboring points (must be resized to \a k a priori!) \return
+ * number of neighbors found
+ *
+ * \exception asserts in debug mode if the index is not between 0 and the maximum
+ * number of points
+ */
+ int
+ nearestKSearch(const PointT& point,
+ unsigned int k,
+ Indices& k_indices,
+ std::vector<float>& k_sqr_distances) const override;
+
+ /** \brief Search for all the nearest neighbors of the query point in a given radius.
+ *
+ * \attention This method does not do any bounds checking for the input index
+ * (i.e., index >= cloud.size () || index < 0), and assumes valid (i.e., finite) data.
+ *
+ * \param[in] point a given \a valid (i.e., finite) query point
+ * \param[in] radius the radius of the sphere bounding all of p_q's neighbors
+ * \param[out] k_indices the resultant indices of the neighboring points
+ * \param[out] k_sqr_distances the resultant squared distances to the neighboring
+ * points \param[in] max_nn if given, bounds the maximum returned neighbors to this
+ * value. If \a max_nn is set to 0 or to a number higher than the number of points in
+ * the input cloud, all neighbors in \a radius will be returned. \return number of
+ * neighbors found in radius
+ *
+ * \exception asserts in debug mode if the index is not between 0 and the maximum
+ * number of points
+ */
+ int
+ radiusSearch(const PointT& point,
+ double radius,
+ Indices& k_indices,
+ std::vector<float>& k_sqr_distances,
+ unsigned int max_nn = 0) const override;
+
+private:
+ /** \brief Internal cleanup method. */
+ void
+ cleanup();
+
+ /** \brief Converts a PointCloud to the internal FLANN point array representation.
+ * Returns the number of points. \param cloud the PointCloud
+ */
+ void
+ convertCloudToArray(const PointCloud& cloud);
+
+ /** \brief Converts a PointCloud with a given set of indices to the internal FLANN
+ * point array representation. Returns the number of points. \param[in] cloud the
+ * PointCloud data \param[in] indices the point cloud indices
+ */
+ void
+ convertCloudToArray(const PointCloud& cloud, const Indices& indices);
+
+private:
+ /** \brief Class getName method. */
+ std::string
+ getName() const override
+ {
+ return ("KdTreeFLANN");
+ }
+
+ /** \brief A FLANN index object. */
+ std::shared_ptr<FLANNIndex> flann_index_;
+
+ /** \brief Internal pointer to data. TODO: replace with std::shared_ptr<float[]> with
+ * C++17*/
+ std::shared_ptr<float> cloud_;
+
+ /** \brief mapping between internal and external indices. */
+ std::vector<int> index_mapping_;
+
+ /** \brief whether the mapping between internal and external indices is identity */
+ bool identity_mapping_;
+
+ /** \brief Tree dimensionality (i.e. the number of dimensions per point). */
+ int dim_;
+
+ /** \brief The total size of the data (either equal to the number of points in the
+ * input cloud or to the number of indices - if passed). */
+ uindex_t total_nr_points_;
+
+ /** \brief The KdTree search parameters for K-nearest neighbors. */
+ ::flann::SearchParams param_k_;
+
+ /** \brief The KdTree search parameters for radius search. */
+ ::flann::SearchParams param_radius_;
};
}
\section secKDtreePresentation Overview
The <b>pcl_kdtree</b> library provides the kd-tree data-structure, using
- <a href="http://www.cs.ubc.ca/research/flann/</a>,
+ <a href="http://www.cs.ubc.ca/research/flann/">FLANN</a>,
that allows for fast <a href="http://en.wikipedia.org/wiki/Nearest_neighbor_search">nearest neighbor searches</a>.
A <a href="http://en.wikipedia.org/wiki/Kd-tree">Kd-tree</a> (<i>k</i>-dimensional tree) is a space-partitioning data
#include <pcl/point_types.h>
#include <pcl/keypoints/keypoint.h>
#include <pcl/common/intensity.h>
+#include <pcl/common/io.h> // for copyPointCloud
namespace pcl
{
void responseCurvature (PointCloudOut &output) const;
void refineCorners (PointCloudOut &corners) const;
/** \brief calculates the upper triangular part of unnormalized covariance matrix over the normals given by the indices.*/
- void calculateNormalCovar (const std::vector<int>& neighbors, float* coefficients) const;
+ void calculateNormalCovar (const pcl::Indices& neighbors, float* coefficients) const;
private:
float threshold_;
bool refine_;
void detectKeypoints (PointCloudOut &output);
void responseTomasi (PointCloudOut &output) const;
void refineCorners (PointCloudOut &corners) const;
- void calculateCombinedCovar (const std::vector<int>& neighbors, float* coefficients) const;
+ void calculateCombinedCovar (const pcl::Indices& neighbors, float* coefficients) const;
private:
float threshold_;
bool refine_;
#include <pcl/keypoints/harris_3d.h>
#include <pcl/common/io.h>
-#include <pcl/filters/passthrough.h>
-#include <pcl/filters/extract_indices.h>
#include <pcl/features/normal_3d.h>
#include <pcl/features/integral_image_normal.h>
-#include <pcl/common/time.h>
#include <pcl/common/centroid.h>
#ifdef __SSE__
#include <xmmintrin.h>
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointInT, typename PointOutT, typename NormalT> void
-pcl::HarrisKeypoint3D<PointInT, PointOutT, NormalT>::calculateNormalCovar (const std::vector<int>& neighbors, float* coefficients) const
+pcl::HarrisKeypoint3D<PointInT, PointOutT, NormalT>::calculateNormalCovar (const pcl::Indices& neighbors, float* coefficients) const
{
unsigned count = 0;
// indices 0 1 2 3 4 5 6 7
float zz = 0;
- for (const int &neighbor : neighbors)
+ for (const auto &neighbor : neighbors)
{
if (std::isfinite ((*normals_)[neighbor].normal_x))
{
memset (coefficients, 0, sizeof (float) * 8);
#else
memset (coefficients, 0, sizeof (float) * 8);
- for (std::vector<int>::const_iterator iIt = neighbors.begin(); iIt != neighbors.end(); ++iIt)
+ for (const auto& index : neighbors)
{
- if (std::isfinite ((*normals_)[*iIt].normal_x))
+ if (std::isfinite ((*normals_)[index].normal_x))
{
- coefficients[0] += (*normals_)[*iIt].normal_x * (*normals_)[*iIt].normal_x;
- coefficients[1] += (*normals_)[*iIt].normal_x * (*normals_)[*iIt].normal_y;
- coefficients[2] += (*normals_)[*iIt].normal_x * (*normals_)[*iIt].normal_z;
+ coefficients[0] += (*normals_)[index].normal_x * (*normals_)[index].normal_x;
+ coefficients[1] += (*normals_)[index].normal_x * (*normals_)[index].normal_y;
+ coefficients[2] += (*normals_)[index].normal_x * (*normals_)[index].normal_z;
- coefficients[5] += (*normals_)[*iIt].normal_y * (*normals_)[*iIt].normal_y;
- coefficients[6] += (*normals_)[*iIt].normal_y * (*normals_)[*iIt].normal_z;
- coefficients[7] += (*normals_)[*iIt].normal_z * (*normals_)[*iIt].normal_z;
+ coefficients[5] += (*normals_)[index].normal_y * (*normals_)[index].normal_y;
+ coefficients[6] += (*normals_)[index].normal_y * (*normals_)[index].normal_z;
+ coefficients[7] += (*normals_)[index].normal_z * (*normals_)[index].normal_z;
++count;
}
}
else
{
- output.points.clear ();
- output.points.reserve (response->size());
+ output.clear ();
+ output.reserve (response->size());
#pragma omp parallel for \
default(none) \
(*response)[idx].intensity < threshold_)
continue;
- std::vector<int> nn_indices;
+ pcl::Indices nn_indices;
std::vector<float> nn_dists;
tree_->radiusSearch (idx, search_radius_, nn_indices, nn_dists);
bool is_maxima = true;
- for (std::vector<int>::const_iterator iIt = nn_indices.begin(); iIt != nn_indices.end(); ++iIt)
+ for (const auto& index : nn_indices)
{
- if ((*response)[idx].intensity < (*response)[*iIt].intensity)
+ if ((*response)[idx].intensity < (*response)[index].intensity)
{
is_maxima = false;
break;
if (is_maxima)
#pragma omp critical
{
- output.points.push_back ((*response)[idx]);
+ output.push_back ((*response)[idx]);
keypoints_indices_->indices.push_back (idx);
}
}
output [pIdx].intensity = 0.0; //std::numeric_limits<float>::quiet_NaN ();
if (isFinite (pointIn))
{
- std::vector<int> nn_indices;
+ pcl::Indices nn_indices;
std::vector<float> nn_dists;
tree_->radiusSearch (pointIn, search_radius_, nn_indices, nn_dists);
calculateNormalCovar (nn_indices, covar);
output [pIdx].intensity = 0.0;
if (isFinite (pointIn))
{
- std::vector<int> nn_indices;
+ pcl::Indices nn_indices;
std::vector<float> nn_dists;
tree_->radiusSearch (pointIn, search_radius_, nn_indices, nn_dists);
calculateNormalCovar (nn_indices, covar);
output [pIdx].intensity = 0.0;
if (isFinite (pointIn))
{
- std::vector<int> nn_indices;
+ pcl::Indices nn_indices;
std::vector<float> nn_dists;
tree_->radiusSearch (pointIn, search_radius_, nn_indices, nn_dists);
calculateNormalCovar (nn_indices, covar);
point.y = (*input_)[idx].y;
point.z = (*input_)[idx].z;
point.intensity = (*normals_)[idx].curvature;
- output.points.push_back(point);
+ output.push_back(point);
}
// does not change the order
output.height = input_->height;
output [pIdx].intensity = 0.0;
if (isFinite (pointIn))
{
- std::vector<int> nn_indices;
+ pcl::Indices nn_indices;
std::vector<float> nn_dists;
tree_->radiusSearch (pointIn, search_radius_, nn_indices, nn_dists);
calculateNormalCovar (nn_indices, covar);
Eigen::Matrix3f NNT;
Eigen::Matrix3f NNTInv;
Eigen::Vector3f NNTp;
- float diff;
const unsigned max_iterations = 10;
#pragma omp parallel for \
default(none) \
shared(corners) \
- firstprivate(nnT, NNT, NNTInv, NNTp, diff) \
+ firstprivate(nnT, NNT, NNTInv, NNTp) \
num_threads(threads_)
for (int cIdx = 0; cIdx < static_cast<int> (corners.size ()); ++cIdx)
{
corner.x = corners[cIdx].x;
corner.y = corners[cIdx].y;
corner.z = corners[cIdx].z;
- std::vector<int> nn_indices;
+ pcl::Indices nn_indices;
std::vector<float> nn_dists;
tree_->radiusSearch (corner, search_radius_, nn_indices, nn_dists);
- for (std::vector<int>::const_iterator iIt = nn_indices.begin(); iIt != nn_indices.end(); ++iIt)
+ for (const auto& index : nn_indices)
{
- if (!std::isfinite ((*normals_)[*iIt].normal_x))
+ if (!std::isfinite ((*normals_)[index].normal_x))
continue;
- nnT = (*normals_)[*iIt].getNormalVector3fMap () * (*normals_)[*iIt].getNormalVector3fMap ().transpose();
+ nnT = (*normals_)[index].getNormalVector3fMap () * (*normals_)[index].getNormalVector3fMap ().transpose();
NNT += nnT;
- NNTp += nnT * (*surface_)[*iIt].getVector3fMap ();
+ NNTp += nnT * (*surface_)[index].getVector3fMap ();
}
if (invert3x3SymMatrix (NNT, NNTInv) != 0)
corners[cIdx].getVector3fMap () = NNTInv * NNTp;
- diff = (corners[cIdx].getVector3fMap () - corner.getVector3fMap()).squaredNorm ();
- } while (diff > 1e-6 && ++iterations < max_iterations);
+ const auto diff = (corners[cIdx].getVector3fMap () - corner.getVector3fMap()).squaredNorm ();
+ if (diff <= 1e-6) {
+ break;
+ }
+ } while (++iterations < max_iterations);
}
}
#define PCL_INSTANTIATE_HarrisKeypoint3D(T,U,N) template class PCL_EXPORTS pcl::HarrisKeypoint3D<T,U,N>;
#endif // #ifndef PCL_HARRIS_KEYPOINT_3D_IMPL_H_
-
#ifndef PCL_HARRIS_KEYPOINT_6D_IMPL_H_
#define PCL_HARRIS_KEYPOINT_6D_IMPL_H_
+#include <Eigen/Eigenvalues> // for SelfAdjointEigenSolver
#include <pcl/keypoints/harris_6d.h>
#include <pcl/common/io.h>
#include <pcl/features/normal_3d.h>
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointInT, typename PointOutT, typename NormalT> void
-pcl::HarrisKeypoint6D<PointInT, PointOutT, NormalT>::calculateCombinedCovar (const std::vector<int>& neighbors, float* coefficients) const
+pcl::HarrisKeypoint6D<PointInT, PointOutT, NormalT>::calculateCombinedCovar (const pcl::Indices& neighbors, float* coefficients) const
{
memset (coefficients, 0, sizeof (float) * 21);
unsigned count = 0;
- for (const int &neighbor : neighbors)
+ for (const auto &neighbor : neighbors)
{
if (std::isfinite ((*normals_)[neighbor].normal_x) && std::isfinite ((*intensity_gradients_)[neighbor].gradient [0]))
{
}
else
{
- output.points.clear ();
- output.points.reserve (response->size());
+ output.clear ();
+ output.reserve (response->size());
#pragma omp parallel for \
default(none) \
if (!isFinite ((*response)[idx]) || (*response)[idx].intensity < threshold_)
continue;
- std::vector<int> nn_indices;
+ pcl::Indices nn_indices;
std::vector<float> nn_dists;
tree_->radiusSearch (idx, search_radius_, nn_indices, nn_dists);
bool is_maxima = true;
- for (std::vector<int>::const_iterator iIt = nn_indices.begin(); iIt != nn_indices.end(); ++iIt)
+ for (const auto& index : nn_indices)
{
- if ((*response)[idx].intensity < (*response)[*iIt].intensity)
+ if ((*response)[idx].intensity < (*response)[index].intensity)
{
is_maxima = false;
break;
if (is_maxima)
#pragma omp critical
{
- output.points.push_back ((*response)[idx]);
+ output.push_back ((*response)[idx]);
keypoints_indices_->indices.push_back (idx);
}
}
pointOut.intensity = 0.0; //std::numeric_limits<float>::quiet_NaN ();
if (isFinite (pointIn))
{
- std::vector<int> nn_indices;
+ pcl::Indices nn_indices;
std::vector<float> nn_dists;
tree_->radiusSearch (pointIn, search_radius_, nn_indices, nn_dists);
calculateCombinedCovar (nn_indices, covar);
pointOut.z = pointIn.z;
#pragma omp critical
- output.points.push_back(pointOut);
+ output.push_back(pointOut);
}
output.height = input_->height;
output.width = input_->width;
corner.x = cornerIt->x;
corner.y = cornerIt->y;
corner.z = cornerIt->z;
- std::vector<int> nn_indices;
+ pcl::Indices nn_indices;
std::vector<float> nn_dists;
search.radiusSearch (corner, search_radius_, nn_indices, nn_dists);
- for (std::vector<int>::const_iterator iIt = nn_indices.begin(); iIt != nn_indices.end(); ++iIt)
+ for (const auto& index : nn_indices)
{
- normal = reinterpret_cast<const Eigen::Vector3f*> (&((*normals_)[*iIt].normal_x));
- point = reinterpret_cast<const Eigen::Vector3f*> (&((*surface_)[*iIt].x));
+ normal = reinterpret_cast<const Eigen::Vector3f*> (&((*normals_)[index].normal_x));
+ point = reinterpret_cast<const Eigen::Vector3f*> (&((*surface_)[index].x));
nnT = (*normal) * (normal->transpose());
NNT += nnT;
NNTp += nnT * (*point);
#ifndef PCL_ISS_KEYPOINT3D_IMPL_H_
#define PCL_ISS_KEYPOINT3D_IMPL_H_
+#include <Eigen/Eigenvalues> // for SelfAdjointEigenSolver
#include <pcl/features/boundary.h>
#include <pcl/features/normal_3d.h>
#include <pcl/features/integral_image_normal.h>
if (pcl::isFinite(current_point))
{
- std::vector<int> nn_indices;
+ pcl::Indices nn_indices;
std::vector<float> nn_distances;
int n_neighbors;
- this->searchForNeighbors (static_cast<int> (index), border_radius, nn_indices, nn_distances);
+ this->searchForNeighbors (index, border_radius, nn_indices, nn_distances);
n_neighbors = static_cast<int> (nn_indices.size ());
cov_m = Eigen::Matrix3d::Zero ();
- std::vector<int> nn_indices;
+ pcl::Indices nn_indices;
std::vector<float> nn_distances;
int n_neighbors;
double cov[9];
memset(cov, 0, sizeof(double) * 9);
- for (int n_idx = 0; n_idx < n_neighbors; n_idx++)
+ for (const auto& n_idx : nn_indices)
{
- const PointInT& n_point = (*input_)[nn_indices[n_idx]];
+ const PointInT& n_point = (*input_)[n_idx];
double neigh_point[3];
memset(neigh_point, 0, sizeof(double) * 3);
pcl::ISSKeypoint3D<PointInT, PointOutT, NormalT>::detectKeypoints (PointCloudOut &output)
{
// Make sure the output cloud is empty
- output.points.clear ();
+ output.clear ();
if (border_radius_ > 0.0)
edge_points_ = getBoundaryPoints (*(input_->makeShared ()), border_radius_, angle_threshold_);
if ((border_radius_ > 0.0) && (pcl::isFinite(current_point)))
{
- std::vector<int> nn_indices;
+ pcl::Indices nn_indices;
std::vector<float> nn_distances;
- this->searchForNeighbors (static_cast<int> (index), border_radius_, nn_indices, nn_distances);
+ this->searchForNeighbors (index, border_radius_, nn_indices, nn_distances);
- for (const int &nn_index : nn_indices)
+ for (const auto &nn_index : nn_indices)
{
if (edge_points_[nn_index])
{
if ((third_eigen_value_[index] > 0.0) && (pcl::isFinite(current_point)))
{
- std::vector<int> nn_indices;
+ pcl::Indices nn_indices;
std::vector<float> nn_distances;
int n_neighbors;
- this->searchForNeighbors (static_cast<int> (index), non_max_radius_, nn_indices, nn_distances);
+ this->searchForNeighbors (index, non_max_radius_, nn_indices, nn_distances);
n_neighbors = static_cast<int> (nn_indices.size ());
{
bool is_max = true;
- for (int j = 0 ; j < n_neighbors; j++)
- if (third_eigen_value_[index] < third_eigen_value_[nn_indices[j]])
+ for (const auto& j : nn_indices)
+ if (third_eigen_value_[index] < third_eigen_value_[j])
is_max = false;
if (is_max)
feat_max[index] = true;
{
PointOutT p;
p.getVector3fMap () = (*input_)[index].getVector3fMap ();
- output.points.push_back(p);
+ output.push_back(p);
keypoints_indices_->indices.push_back (index);
}
}
#ifndef PCL_KEYPOINT_IMPL_H_
#define PCL_KEYPOINT_IMPL_H_
+#include <pcl/console/print.h> // for PCL_ERROR
+
+#include <pcl/search/organized.h> // for OrganizedNeighbor
+#include <pcl/search/kdtree.h> // for KdTree
namespace pcl
{
if (surface_ == input_) // if the two surfaces are the same
{
// Declare the search locator definition
- search_method_ = [this] (int index, double radius, std::vector<int> &k_indices, std::vector<float> &k_distances)
+ search_method_ = [this] (pcl::index_t index, double radius, pcl::Indices &k_indices, std::vector<float> &k_distances)
{
return tree_->radiusSearch (index, radius, k_indices, k_distances, 0);
};
else
{
// Declare the search locator definition
- search_method_surface_ = [this] (const PointCloudIn &cloud, int index, double radius, std::vector<int> &k_indices, std::vector<float> &k_distances)
+ search_method_surface_ = [this] (const PointCloudIn &cloud, pcl::index_t index, double radius, pcl::Indices &k_indices, std::vector<float> &k_distances)
{
return tree_->radiusSearch (cloud, index, radius, k_indices, k_distances, 0);
};
if (surface_ == input_) // if the two surfaces are the same
{
// Declare the search locator definition
- search_method_ = [this] (int index, int k, std::vector<int> &k_indices, std::vector<float> &k_distances)
+ search_method_ = [this] (pcl::index_t index, int k, pcl::Indices &k_indices, std::vector<float> &k_distances)
{
return tree_->nearestKSearch (index, k, k_indices, k_distances);
};
else
{
// Declare the search locator definition
- search_method_surface_ = [this] (const PointCloudIn &cloud, int index, int k, std::vector<int> &k_indices, std::vector<float> &k_distances)
+ search_method_surface_ = [this] (const PointCloudIn &cloud, pcl::index_t index, int k, pcl::Indices &k_indices, std::vector<float> &k_distances)
{
return tree_->nearestKSearch (cloud, index, k, k_indices, k_distances);
};
scale_idx_ = pcl::getFieldIndex<PointOutT> ("scale", out_fields_);
// Make sure the output cloud is empty
- output.points.clear ();
+ output.clear ();
// Create a local copy of the input cloud that will be resized for each octave
typename pcl::PointCloud<PointInT>::Ptr cloud (new pcl::PointCloud<PointInT> (*input_));
computeScaleSpace (input, tree, scales, diff_of_gauss);
// Find extrema in the DoG scale space
- std::vector<int> extrema_indices, extrema_scales;
+ pcl::Indices extrema_indices;
+ std::vector<int> extrema_scales;
findScaleSpaceExtrema (input, tree, diff_of_gauss, extrema_indices, extrema_scales);
- output.points.reserve (output.size () + extrema_indices.size ());
+ output.reserve (output.size () + extrema_indices.size ());
// Save scale?
if (scale_idx_ != -1)
{
for (std::size_t i_keypoint = 0; i_keypoint < extrema_indices.size (); ++i_keypoint)
{
PointOutT keypoint;
- const int &keypoint_index = extrema_indices[i_keypoint];
+ const auto &keypoint_index = extrema_indices[i_keypoint];
keypoint.x = input[keypoint_index].x;
keypoint.y = input[keypoint_index].y;
keypoint.z = input[keypoint_index].z;
memcpy (reinterpret_cast<char*> (&keypoint) + out_fields_[scale_idx_].offset,
&scales[extrema_scales[i_keypoint]], sizeof (float));
- output.points.push_back (keypoint);
+ output.push_back (keypoint);
}
}
else
{
// Add keypoints to output
- for (const int &keypoint_index : extrema_indices)
+ for (const auto &keypoint_index : extrema_indices)
{
PointOutT keypoint;
keypoint.x = input[keypoint_index].x;
keypoint.y = input[keypoint_index].y;
keypoint.z = input[keypoint_index].z;
- output.points.push_back (keypoint);
+ output.push_back (keypoint);
}
}
}
for (int i_point = 0; i_point < static_cast<int> (input.size ()); ++i_point)
{
- std::vector<int> nn_indices;
+ pcl::Indices nn_indices;
std::vector<float> nn_dist;
tree.radiusSearch (i_point, max_radius, nn_indices, nn_dist); // *
// * note: at this stage of the algorithm, we must find all points within a radius defined by the maximum scale,
template <typename PointInT, typename PointOutT> void
pcl::SIFTKeypoint<PointInT, PointOutT>::findScaleSpaceExtrema (
const PointCloudIn &input, KdTree &tree, const Eigen::MatrixXf &diff_of_gauss,
- std::vector<int> &extrema_indices, std::vector<int> &extrema_scales)
+ pcl::Indices &extrema_indices, std::vector<int> &extrema_scales)
{
const int k = 25;
- std::vector<int> nn_indices (k);
+ pcl::Indices nn_indices (k);
std::vector<float> nn_dist (k);
const int nr_scales = static_cast<int> (diff_of_gauss.cols ());
// Find minima and maxima in differences inside the input cloud
typename pcl::search::Search<PointT>::Ptr input_tree = cloud_trees_.back ();
- for (int point_i = 0; point_i < static_cast<int> (input_->size ()); ++point_i)
+ for (pcl::index_t point_i = 0; point_i < static_cast<pcl::index_t> (input_->size ()); ++point_i)
{
- std::vector<int> nn_indices;
+ pcl::Indices nn_indices;
std::vector<float> nn_distances;
input_tree->radiusSearch (point_i, input_scale_ * neighborhood_constant_, nn_indices, nn_distances);
bool is_min = true, is_max = true;
- for (const int &nn_index : nn_indices)
+ for (const auto &nn_index : nn_indices)
if (nn_index != point_i)
{
if (diffs[input_index_][point_i] < diffs[input_index_][nn_index])
cloud_trees_[cloud_i]->radiusSearch (point_i, scales_[scale_i].first * neighborhood_constant_, nn_indices, nn_distances);
bool is_min_other_scale = true, is_max_other_scale = true;
- for (const int &nn_index : nn_indices)
+ for (const auto &nn_index : nn_indices)
if (nn_index != point_i)
{
if (diffs[input_index_][point_i] < diffs[cloud_i][nn_index])
// check if point was minimum/maximum over all the scales
if (passed_min || passed_max)
{
- output.points.push_back ((*input_)[point_i]);
+ output.push_back ((*input_)[point_i]);
keypoints_indices_->indices.push_back (point_i);
}
}
#ifndef PCL_SUSAN_IMPL_HPP_
#define PCL_SUSAN_IMPL_HPP_
+#include <pcl/common/io.h> // for getFieldIndex
#include <pcl/keypoints/susan.h>
#include <pcl/features/normal_3d.h>
#include <pcl/features/integral_image_normal.h>
// template <typename PointInT, typename PointOutT, typename NormalT, typename IntensityT> void
// pcl::SUSANKeypoint<PointInT, PointOutT, NormalT, IntensityT>::USAN (const PointInT& nucleus,
// const NormalT& nucleus_normal,
-// const std::vector<int>& neighbors,
+// const pcl::Indices& neighbors,
// const float& t,
// float& response,
// Eigen::Vector3f& centroid) const
// //xx xy xz yy yz zz
// std::vector<float> coefficients(6);
// memset (&coefficients[0], 0, sizeof (float) * 6);
-// for (std::vector<int>::const_iterator index = neighbors.begin(); index != neighbors.end(); ++index)
+// for (const auto& index : neighbors)
// {
-// if (std::isfinite ((*normals_)[*index].normal_x))
+// if (std::isfinite ((*normals_)[index].normal_x))
// {
-// Eigen::Vector3f diff = (*normals_)[*index].getNormal3fMap () - nucleus_normal.getNormal3fMap ();
+// Eigen::Vector3f diff = (*normals_)[index].getNormal3fMap () - nucleus_normal.getNormal3fMap ();
// float c = diff.norm () / t;
// c = -1 * pow (c, 6.0);
// c = std::exp (c);
-// Eigen::Vector3f xyz = (*surface_)[*index].getVector3fMap ();
+// Eigen::Vector3f xyz = (*surface_)[index].getVector3fMap ();
// centroid += c * xyz;
// area += c;
// coefficients[0] += c * (x0 - xyz.x) * (x0 - xyz.x);
// Check if the output has a "label" field
label_idx_ = pcl::getFieldIndex<PointOutT> ("label", out_fields_);
- const int input_size = static_cast<int> (input_->size ());
- for (int point_index = 0; point_index < input_size; ++point_index)
+ const auto input_size = static_cast<pcl::index_t> (input_->size ());
+ for (pcl::index_t point_index = 0; point_index < input_size; ++point_index)
{
const PointInT& point_in = input_->points [point_index];
const NormalT& normal_in = normals_->points [point_index];
Eigen::Vector3f nucleus = point_in.getVector3fMap ();
Eigen::Vector3f nucleus_normal = normals_->points [point_index].getNormalVector3fMap ();
float nucleus_intensity = intensity_ (point_in);
- std::vector<int> nn_indices;
+ pcl::Indices nn_indices;
std::vector<float> nn_dists;
tree_->radiusSearch (point_in, search_radius_, nn_indices, nn_dists);
float area = 0;
Eigen::Vector3f centroid = Eigen::Vector3f::Zero ();
// Exclude nucleus from the usan
std::vector<int> usan; usan.reserve (nn_indices.size () - 1);
- for (std::vector<int>::const_iterator index = nn_indices.begin(); index != nn_indices.end(); ++index)
+ for (const auto& index : nn_indices)
{
- if ((*index != point_index) && std::isfinite ((*normals_)[*index].normal_x))
+ if ((index != point_index) && std::isfinite ((*normals_)[index].normal_x))
{
// if the point fulfill condition
- if ((std::abs (nucleus_intensity - intensity_ ((*input_)[*index])) <= intensity_threshold_) ||
- (1 - nucleus_normal.dot ((*normals_)[*index].getNormalVector3fMap ()) <= angular_threshold_))
+ if ((std::abs (nucleus_intensity - intensity_ ((*input_)[index])) <= intensity_threshold_) ||
+ (1 - nucleus_normal.dot ((*normals_)[index].getNormalVector3fMap ()) <= angular_threshold_))
{
++area;
- centroid += (*input_)[*index].getVector3fMap ();
- usan.push_back (*index);
+ centroid += (*input_)[index].getVector3fMap ();
+ usan.push_back (index);
}
}
}
}
else
{
- output.points.clear ();
- output.points.reserve (response->size());
+ output.clear ();
+ output.reserve (response->size());
- for (int idx = 0; idx < static_cast<int> (response->size ()); ++idx)
+ for (pcl::index_t idx = 0; idx < static_cast<pcl::index_t> (response->size ()); ++idx)
{
const PointOutT& point_in = response->points [idx];
const NormalT& normal_in = normals_->points [idx];
const float intensity = intensity_out_ ((*response)[idx]);
if (!isFinite (point_in) || !isFinite (normal_in) || (intensity == 0))
continue;
- std::vector<int> nn_indices;
+ pcl::Indices nn_indices;
std::vector<float> nn_dists;
tree_->radiusSearch (idx, search_radius_, nn_indices, nn_dists);
bool is_minima = true;
- for (std::vector<int>::const_iterator nn_it = nn_indices.begin(); nn_it != nn_indices.end(); ++nn_it)
+ for (const auto& nn_index : nn_indices)
{
-// if (intensity > (*response)[*nn_it].intensity)
- if (intensity > intensity_out_ ((*response)[*nn_it]))
+// if (intensity > (*response)[nn_index].intensity)
+ if (intensity > intensity_out_ ((*response)[nn_index]))
{
is_minima = false;
break;
}
if (is_minima)
{
- output.points.push_back ((*response)[idx]);
+ output.push_back ((*response)[idx]);
keypoints_indices_->indices.push_back (idx);
}
}
}
// Non maximas suppression
- std::vector<int> indices = *indices_;
+ pcl::Indices indices = *indices_;
std::sort (indices.begin (), indices.end (), [this] (int p1, int p2) { return greaterCornernessAtIndices (p1, p2); });
output.clear ();
}
}
// Non maximas suppression
- std::vector<int> indices = *indices_;
+ pcl::Indices indices = *indices_;
std::sort (indices.begin (), indices.end (), [this] (int p1, int p2) { return greaterCornernessAtIndices (p1, p2); });
output.clear ();
#pragma once
#include <pcl/keypoints/keypoint.h>
+#include <pcl/octree/octree_search.h> // for OctreePointCloudSearch
namespace pcl
{
// PCL includes
#include <pcl/pcl_base.h>
-#include <pcl/search/pcl_search.h>
+#include <pcl/search/search.h> // for Search
#include <pcl/pcl_config.h>
#include <functional>
using PointCloudInPtr = typename PointCloudIn::Ptr;
using PointCloudInConstPtr = typename PointCloudIn::ConstPtr;
using PointCloudOut = pcl::PointCloud<PointOutT>;
- using SearchMethod = std::function<int (int, double, std::vector<int> &, std::vector<float> &)>;
- using SearchMethodSurface = std::function<int (const PointCloudIn &cloud, int index, double, std::vector<int> &, std::vector<float> &)>;
+ using SearchMethod = std::function<int (pcl::index_t, double, pcl::Indices &, std::vector<float> &)>;
+ using SearchMethodSurface = std::function<int (const PointCloudIn &cloud, pcl::index_t index, double, pcl::Indices &, std::vector<float> &)>;
public:
/** \brief Empty constructor. */
* k-nearest neighbors
*/
inline int
- searchForNeighbors (int index, double parameter, std::vector<int> &indices, std::vector<float> &distances) const
+ searchForNeighbors (pcl::index_t index, double parameter, pcl::Indices &indices, std::vector<float> &distances) const
{
if (surface_ == input_) // if the two surfaces are the same
return (search_method_ (index, parameter, indices, distances));
void
findScaleSpaceExtrema (const PointCloudIn &input, KdTree &tree,
const Eigen::MatrixXf &diff_of_gauss,
- std::vector<int> &extrema_indices, std::vector<int> &extrema_scales);
+ pcl::Indices &extrema_indices, std::vector<int> &extrema_scales);
/** \brief The standard deviation of the smallest scale in the scale space.*/
#pragma once
-#ifdef __DEPRECATED
-PCL_DEPRECATED_HEADER(1, 12, "UniformSampling is not a Keypoint anymore, use <pcl/filters/uniform_sampling.h> instead.")
-#endif
+PCL_DEPRECATED_HEADER(1, 15, "UniformSampling is not a Keypoint anymore, use <pcl/filters/uniform_sampling.h> instead.")
#include <pcl/filters/uniform_sampling.h>
}
#else
pcl::utils::ignore(srcimg, srcwidth, srcheight, dstimg, dstwidth);
- PCL_ERROR("brisk without SSSE3 support not implemented");
+ PCL_ERROR("brisk without SSSE3 support not implemented\n");
#endif
}
}
#else
pcl::utils::ignore(srcimg, srcwidth, srcheight, dstimg, dstwidth);
- PCL_ERROR("brisk without SSSE3 support not implemented");
+ PCL_ERROR("brisk without SSSE3 support not implemented\n");
#endif
}
void
NarfKeypoint::detectKeypoints (NarfKeypoint::PointCloudOut& output)
{
- output.points.clear ();
+ output.clear ();
if (indices_)
{
{
if (!is_interest_point_image_[index])
continue;
- output.points.push_back (index);
+ output.push_back (index);
}
}
const std::size_t num_of_examples = examples.size();
if (num_of_examples == 0) {
PCL_ERROR(
- "Reached invalid point in decision tree training: Number of examples is 0!");
+ "Reached invalid point in decision tree training: Number of examples is 0!\n");
return;
};
for (Centroids::value_type& centroid : centroids_) {
PointId num_points_in_cluster = 0;
+ // Set zero
+ for (Point::value_type& c : centroid)
+ c = 0.0;
+
// For each PointId in this set
for (const auto& pid : clusters_to_points_[cid]) {
Point p = data_[pid];
#ifdef __GNUC__
#pragma GCC system_header
#endif
+PCL_DEPRECATED_HEADER(1, 15, "Please include the needed boost headers directly.")
// Marking all Boost headers as system headers to remove warnings
#include <boost/graph/adjacency_list.hpp>
template <typename LeafContainerT, typename BranchContainerT>
void
Octree2BufBase<LeafContainerT, BranchContainerT>::setMaxVoxelIndex(
- unsigned int max_voxel_index_arg)
+ uindex_t max_voxel_index_arg)
{
- unsigned int treeDepth;
+ uindex_t treeDepth;
assert(max_voxel_index_arg > 0);
// tree depth == amount of bits of maxVoxels
- treeDepth = std::max(
- (std::min(static_cast<unsigned int>(OctreeKey::maxDepth),
- static_cast<unsigned int>(std::ceil(std::log2(max_voxel_index_arg))))),
- static_cast<unsigned int>(0));
+ treeDepth =
+ std::max<uindex_t>(std::min<uindex_t>(OctreeKey::maxDepth,
+ std::ceil(std::log2(max_voxel_index_arg))),
+ 0);
// define depthMask_ by setting a single bit to 1 at bit position == tree depth
depth_mask_ = (1 << (treeDepth - 1));
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename LeafContainerT, typename BranchContainerT>
void
-Octree2BufBase<LeafContainerT, BranchContainerT>::setTreeDepth(unsigned int depth_arg)
+Octree2BufBase<LeafContainerT, BranchContainerT>::setTreeDepth(uindex_t depth_arg)
{
assert(depth_arg > 0);
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename LeafContainerT, typename BranchContainerT>
LeafContainerT*
-Octree2BufBase<LeafContainerT, BranchContainerT>::findLeaf(unsigned int idx_x_arg,
- unsigned int idx_y_arg,
- unsigned int idx_z_arg)
+Octree2BufBase<LeafContainerT, BranchContainerT>::findLeaf(uindex_t idx_x_arg,
+ uindex_t idx_y_arg,
+ uindex_t idx_z_arg)
{
// generate key
OctreeKey key(idx_x_arg, idx_y_arg, idx_z_arg);
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename LeafContainerT, typename BranchContainerT>
LeafContainerT*
-Octree2BufBase<LeafContainerT, BranchContainerT>::createLeaf(unsigned int idx_x_arg,
- unsigned int idx_y_arg,
- unsigned int idx_z_arg)
+Octree2BufBase<LeafContainerT, BranchContainerT>::createLeaf(uindex_t idx_x_arg,
+ uindex_t idx_y_arg,
+ uindex_t idx_z_arg)
{
// generate key
OctreeKey key(idx_x_arg, idx_y_arg, idx_z_arg);
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename LeafContainerT, typename BranchContainerT>
bool
-Octree2BufBase<LeafContainerT, BranchContainerT>::existLeaf(
- unsigned int idx_x_arg, unsigned int idx_y_arg, unsigned int idx_z_arg) const
+Octree2BufBase<LeafContainerT, BranchContainerT>::existLeaf(uindex_t idx_x_arg,
+ uindex_t idx_y_arg,
+ uindex_t idx_z_arg) const
{
// generate key
OctreeKey key(idx_x_arg, idx_y_arg, idx_z_arg);
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename LeafContainerT, typename BranchContainerT>
void
-Octree2BufBase<LeafContainerT, BranchContainerT>::removeLeaf(unsigned int idx_x_arg,
- unsigned int idx_y_arg,
- unsigned int idx_z_arg)
+Octree2BufBase<LeafContainerT, BranchContainerT>::removeLeaf(uindex_t idx_x_arg,
+ uindex_t idx_y_arg,
+ uindex_t idx_z_arg)
{
// generate key
OctreeKey key(idx_x_arg, idx_y_arg, idx_z_arg);
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename LeafContainerT, typename BranchContainerT>
-unsigned int
+uindex_t
Octree2BufBase<LeafContainerT, BranchContainerT>::createLeafRecursive(
const OctreeKey& key_arg,
- unsigned int depth_mask_arg,
+ uindex_t depth_mask_arg,
BranchNode* branch_arg,
LeafNode*& return_leaf_arg,
BranchNode*& parent_of_leaf_arg,
OctreeNode* child_node = branch_arg->getChildPtr(!buffer_selector_, child_idx);
if (child_node->getNodeType() == LEAF_NODE) {
child_leaf = static_cast<LeafNode*>(child_node);
+ child_leaf->getContainer() = LeafContainer(); // Clear contents of leaf
branch_arg->setChildPtr(buffer_selector_, child_idx, child_node);
}
else {
void
Octree2BufBase<LeafContainerT, BranchContainerT>::findLeafRecursive(
const OctreeKey& key_arg,
- unsigned int depth_mask_arg,
+ uindex_t depth_mask_arg,
BranchNode* branch_arg,
LeafContainerT*& result_arg) const
{
template <typename LeafContainerT, typename BranchContainerT>
bool
Octree2BufBase<LeafContainerT, BranchContainerT>::deleteLeafRecursive(
- const OctreeKey& key_arg, unsigned int depth_mask_arg, BranchNode* branch_arg)
+ const OctreeKey& key_arg, uindex_t depth_mask_arg, BranchNode* branch_arg)
{
// index to branch child
unsigned char child_idx;
bool do_XOR_encoding_arg,
bool new_leafs_filter_arg)
{
- // bit pattern
- char branch_bit_pattern_curr_buffer;
- char branch_bit_pattern_prev_buffer;
- char node_XOR_bit_pattern;
-
- // occupancy bit patterns of branch node (current and previous octree buffer)
- branch_bit_pattern_curr_buffer = getBranchBitPattern(*branch_arg, buffer_selector_);
- branch_bit_pattern_prev_buffer = getBranchBitPattern(*branch_arg, !buffer_selector_);
-
- // XOR of current and previous occupancy bit patterns
- node_XOR_bit_pattern =
- branch_bit_pattern_curr_buffer ^ branch_bit_pattern_prev_buffer;
-
if (binary_tree_out_arg) {
+ // occupancy bit patterns of branch node (current octree buffer)
+ const char branch_bit_pattern_curr_buffer =
+ getBranchBitPattern(*branch_arg, buffer_selector_);
if (do_XOR_encoding_arg) {
+ // occupancy bit patterns of branch node (previous octree buffer)
+ const char branch_bit_pattern_prev_buffer =
+ getBranchBitPattern(*branch_arg, !buffer_selector_);
+ // XOR of current and previous occupancy bit patterns
+ const char node_XOR_bit_pattern =
+ branch_bit_pattern_curr_buffer ^ branch_bit_pattern_prev_buffer;
// write XOR bit pattern to output vector
binary_tree_out_arg->push_back(node_XOR_bit_pattern);
}
void
Octree2BufBase<LeafContainerT, BranchContainerT>::deserializeTreeRecursive(
BranchNode* branch_arg,
- unsigned int depth_mask_arg,
+ uindex_t depth_mask_arg,
OctreeKey& key_arg,
typename std::vector<char>::const_iterator& binaryTreeIT_arg,
typename std::vector<char>::const_iterator& binaryTreeIT_End_arg,
#ifndef PCL_OCTREE_BASE_HPP
#define PCL_OCTREE_BASE_HPP
-#include <pcl/impl/instantiate.hpp>
-
#include <vector>
namespace pcl {
template <typename LeafContainerT, typename BranchContainerT>
void
OctreeBase<LeafContainerT, BranchContainerT>::setMaxVoxelIndex(
- unsigned int max_voxel_index_arg)
+ uindex_t max_voxel_index_arg)
{
- unsigned int tree_depth;
+ uindex_t tree_depth;
assert(max_voxel_index_arg > 0);
// tree depth == bitlength of maxVoxels
tree_depth =
- std::min(static_cast<unsigned int>(OctreeKey::maxDepth),
- static_cast<unsigned int>(std::ceil(std::log2(max_voxel_index_arg))));
+ std::min(static_cast<uindex_t>(OctreeKey::maxDepth),
+ static_cast<uindex_t>(std::ceil(std::log2(max_voxel_index_arg))));
// define depthMask_ by setting a single bit to 1 at bit position == tree depth
depth_mask_ = (1 << (tree_depth - 1));
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename LeafContainerT, typename BranchContainerT>
void
-OctreeBase<LeafContainerT, BranchContainerT>::setTreeDepth(unsigned int depth_arg)
+OctreeBase<LeafContainerT, BranchContainerT>::setTreeDepth(uindex_t depth_arg)
{
assert(depth_arg > 0);
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename LeafContainerT, typename BranchContainerT>
LeafContainerT*
-OctreeBase<LeafContainerT, BranchContainerT>::findLeaf(unsigned int idx_x_arg,
- unsigned int idx_y_arg,
- unsigned int idx_z_arg)
+OctreeBase<LeafContainerT, BranchContainerT>::findLeaf(uindex_t idx_x_arg,
+ uindex_t idx_y_arg,
+ uindex_t idx_z_arg)
{
// generate key
OctreeKey key(idx_x_arg, idx_y_arg, idx_z_arg);
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename LeafContainerT, typename BranchContainerT>
LeafContainerT*
-OctreeBase<LeafContainerT, BranchContainerT>::createLeaf(unsigned int idx_x_arg,
- unsigned int idx_y_arg,
- unsigned int idx_z_arg)
+OctreeBase<LeafContainerT, BranchContainerT>::createLeaf(uindex_t idx_x_arg,
+ uindex_t idx_y_arg,
+ uindex_t idx_z_arg)
{
// generate key
OctreeKey key(idx_x_arg, idx_y_arg, idx_z_arg);
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename LeafContainerT, typename BranchContainerT>
bool
-OctreeBase<LeafContainerT, BranchContainerT>::existLeaf(unsigned int idx_x_arg,
- unsigned int idx_y_arg,
- unsigned int idx_z_arg) const
+OctreeBase<LeafContainerT, BranchContainerT>::existLeaf(uindex_t idx_x_arg,
+ uindex_t idx_y_arg,
+ uindex_t idx_z_arg) const
{
// generate key
OctreeKey key(idx_x_arg, idx_y_arg, idx_z_arg);
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename LeafContainerT, typename BranchContainerT>
void
-OctreeBase<LeafContainerT, BranchContainerT>::removeLeaf(unsigned int idx_x_arg,
- unsigned int idx_y_arg,
- unsigned int idx_z_arg)
+OctreeBase<LeafContainerT, BranchContainerT>::removeLeaf(uindex_t idx_x_arg,
+ uindex_t idx_y_arg,
+ uindex_t idx_z_arg)
{
// generate key
OctreeKey key(idx_x_arg, idx_y_arg, idx_z_arg);
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename LeafContainerT, typename BranchContainerT>
-unsigned int
+uindex_t
OctreeBase<LeafContainerT, BranchContainerT>::createLeafRecursive(
const OctreeKey& key_arg,
- unsigned int depth_mask_arg,
+ uindex_t depth_mask_arg,
BranchNode* branch_arg,
LeafNode*& return_leaf_arg,
BranchNode*& parent_of_leaf_arg)
void
OctreeBase<LeafContainerT, BranchContainerT>::findLeafRecursive(
const OctreeKey& key_arg,
- unsigned int depth_mask_arg,
+ uindex_t depth_mask_arg,
BranchNode* branch_arg,
LeafContainerT*& result_arg) const
{
template <typename LeafContainerT, typename BranchContainerT>
bool
OctreeBase<LeafContainerT, BranchContainerT>::deleteLeafRecursive(
- const OctreeKey& key_arg, unsigned int depth_mask_arg, BranchNode* branch_arg)
+ const OctreeKey& key_arg, uindex_t depth_mask_arg, BranchNode* branch_arg)
{
// index to branch child
unsigned char child_idx;
void
OctreeBase<LeafContainerT, BranchContainerT>::deserializeTreeRecursive(
BranchNode* branch_arg,
- unsigned int depth_mask_arg,
+ uindex_t depth_mask_arg,
OctreeKey& key_arg,
typename std::vector<char>::const_iterator& binary_tree_input_it_arg,
typename std::vector<char>::const_iterator& binary_tree_input_it_end_arg,
namespace octree {
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename OctreeT>
-OctreeDepthFirstIterator<OctreeT>::OctreeDepthFirstIterator(unsigned int max_depth_arg)
+OctreeDepthFirstIterator<OctreeT>::OctreeDepthFirstIterator(uindex_t max_depth_arg)
: OctreeIteratorBase<OctreeT>(max_depth_arg), stack_()
{
// initialize iterator
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename OctreeT>
OctreeDepthFirstIterator<OctreeT>::OctreeDepthFirstIterator(OctreeT* octree_arg,
- unsigned int max_depth_arg)
+ uindex_t max_depth_arg)
: OctreeIteratorBase<OctreeT>(octree_arg, max_depth_arg), stack_()
{
// initialize iterator
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename OctreeT>
-OctreeBreadthFirstIterator<OctreeT>::OctreeBreadthFirstIterator(
- unsigned int max_depth_arg)
+OctreeBreadthFirstIterator<OctreeT>::OctreeBreadthFirstIterator(uindex_t max_depth_arg)
: OctreeIteratorBase<OctreeT>(max_depth_arg), FIFO_()
{
OctreeIteratorBase<OctreeT>::reset();
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename OctreeT>
-OctreeBreadthFirstIterator<OctreeT>::OctreeBreadthFirstIterator(
- OctreeT* octree_arg, unsigned int max_depth_arg)
+OctreeBreadthFirstIterator<OctreeT>::OctreeBreadthFirstIterator(OctreeT* octree_arg,
+ uindex_t max_depth_arg)
: OctreeIteratorBase<OctreeT>(octree_arg, max_depth_arg), FIFO_()
{
OctreeIteratorBase<OctreeT>::reset();
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename OctreeT>
OctreeFixedDepthIterator<OctreeT>::OctreeFixedDepthIterator()
-: OctreeBreadthFirstIterator<OctreeT>(0u), fixed_depth_(0u)
+: OctreeBreadthFirstIterator<OctreeT>(nullptr, 0), fixed_depth_(0u)
{}
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename OctreeT>
-OctreeFixedDepthIterator<OctreeT>::OctreeFixedDepthIterator(
- OctreeT* octree_arg, unsigned int fixed_depth_arg)
+OctreeFixedDepthIterator<OctreeT>::OctreeFixedDepthIterator(OctreeT* octree_arg,
+ uindex_t fixed_depth_arg)
: OctreeBreadthFirstIterator<OctreeT>(octree_arg, fixed_depth_arg)
, fixed_depth_(fixed_depth_arg)
{
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename OctreeT>
void
-OctreeFixedDepthIterator<OctreeT>::reset(unsigned int fixed_depth_arg)
+OctreeFixedDepthIterator<OctreeT>::reset(uindex_t fixed_depth_arg)
{
// Set the desired depth to walk through
fixed_depth_ = fixed_depth_arg;
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename OctreeT>
OctreeLeafNodeBreadthFirstIterator<OctreeT>::OctreeLeafNodeBreadthFirstIterator(
- unsigned int max_depth_arg)
+ uindex_t max_depth_arg)
: OctreeBreadthFirstIterator<OctreeT>(max_depth_arg)
{
reset();
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename OctreeT>
OctreeLeafNodeBreadthFirstIterator<OctreeT>::OctreeLeafNodeBreadthFirstIterator(
- OctreeT* octree_arg, unsigned int max_depth_arg)
+ OctreeT* octree_arg, uindex_t max_depth_arg)
: OctreeBreadthFirstIterator<OctreeT>(octree_arg, max_depth_arg)
{
reset();
template <typename OctreeT>
OctreeLeafNodeBreadthFirstIterator<OctreeT>::OctreeLeafNodeBreadthFirstIterator(
OctreeT* octree_arg,
- unsigned int max_depth_arg,
+ uindex_t max_depth_arg,
IteratorState* current_state,
const std::deque<IteratorState>& fifo)
: OctreeBreadthFirstIterator<OctreeT>(octree_arg, max_depth_arg, current_state, fifo)
#include <pcl/common/common.h>
#include <pcl/common/point_tests.h> // for pcl::isFinite
#include <pcl/octree/impl/octree_base.hpp>
+#include <pcl/types.h>
#include <cassert>
addPointsFromInputCloud()
{
if (indices_) {
- for (const int& index : *indices_) {
- assert((index >= 0) && (index < static_cast<int>(input_->size())));
+ for (const auto& index : *indices_) {
+ assert((index >= 0) && (static_cast<std::size_t>(index) < input_->size()));
if (isFinite((*input_)[index])) {
// add points to octree
}
}
else {
- for (std::size_t i = 0; i < input_->size(); i++) {
+ for (index_t i = 0; i < static_cast<index_t>(input_->size()); i++) {
if (isFinite((*input_)[i])) {
// add points to octree
- this->addPointIdx(static_cast<unsigned int>(i));
+ this->addPointIdx(i);
}
}
}
typename OctreeT>
void
pcl::octree::OctreePointCloud<PointT, LeafContainerT, BranchContainerT, OctreeT>::
- addPointFromCloud(const int point_idx_arg, IndicesPtr indices_arg)
+ addPointFromCloud(const uindex_t point_idx_arg, IndicesPtr indices_arg)
{
this->addPointIdx(point_idx_arg);
if (indices_arg)
cloud_arg->push_back(point_arg);
- this->addPointIdx(static_cast<const int>(cloud_arg->size()) - 1);
+ this->addPointIdx(cloud_arg->size() - 1);
}
//////////////////////////////////////////////////////////////////////////////////////////////
cloud_arg->push_back(point_arg);
- this->addPointFromCloud(static_cast<const int>(cloud_arg->size()) - 1, indices_arg);
+ this->addPointFromCloud(cloud_arg->size() - 1, indices_arg);
}
//////////////////////////////////////////////////////////////////////////////////////////////
typename OctreeT>
bool
pcl::octree::OctreePointCloud<PointT, LeafContainerT, BranchContainerT, OctreeT>::
- isVoxelOccupiedAtPoint(const int& point_idx_arg) const
+ isVoxelOccupiedAtPoint(const index_t& point_idx_arg) const
{
// retrieve point from input cloud
const PointT& point = (*this->input_)[point_idx_arg];
typename OctreeT>
void
pcl::octree::OctreePointCloud<PointT, LeafContainerT, BranchContainerT, OctreeT>::
- deleteVoxelAtPoint(const int& point_idx_arg)
+ deleteVoxelAtPoint(const index_t& point_idx_arg)
{
// retrieve point from input cloud
const PointT& point = (*this->input_)[point_idx_arg];
typename LeafContainerT,
typename BranchContainerT,
typename OctreeT>
-int
+pcl::uindex_t
pcl::octree::OctreePointCloud<PointT, LeafContainerT, BranchContainerT, OctreeT>::
getOccupiedVoxelCenters(AlignedPointTVector& voxel_center_list_arg) const
{
typename LeafContainerT,
typename BranchContainerT,
typename OctreeT>
-int
+pcl::uindex_t
pcl::octree::OctreePointCloud<PointT, LeafContainerT, BranchContainerT, OctreeT>::
getApproxIntersectedVoxelCentersBySegment(const Eigen::Vector3f& origin,
const Eigen::Vector3f& end,
const float step_size = static_cast<float>(resolution_) * precision;
// Ensure we get at least one step for the first voxel.
- const int nsteps = std::max(1, static_cast<int>(norm / step_size));
+ const auto nsteps = std::max<std::size_t>(1, norm / step_size);
OctreeKey prev_key;
bool bkeyDefined = false;
// Walk along the line segment with small steps.
- for (int i = 0; i < nsteps; ++i) {
+ for (std::size_t i = 0; i < nsteps; ++i) {
Eigen::Vector3f p = origin + (direction * step_size * static_cast<float>(i));
PointT octree_p;
voxel_center_list.push_back(center);
}
- return (static_cast<int>(voxel_center_list.size()));
+ return (static_cast<uindex_t>(voxel_center_list.size()));
}
//////////////////////////////////////////////////////////////////////////////////////////////
expandLeafNode(LeafNode* leaf_node,
BranchNode* parent_branch,
unsigned char child_idx,
- unsigned int depth_mask)
+ uindex_t depth_mask)
{
if (depth_mask) {
std::size_t leaf_obj_count = (*leaf_node)->getSize();
// copy leaf data
- std::vector<int> leafIndices;
+ Indices leafIndices;
leafIndices.reserve(leaf_obj_count);
(*leaf_node)->getPointIndices(leafIndices);
// add data to new branch
OctreeKey new_index_key;
- for (const int& leafIndex : leafIndices) {
+ for (const auto& leafIndex : leafIndices) {
const PointT& point_from_index = (*input_)[leafIndex];
// generate key
typename OctreeT>
void
pcl::octree::OctreePointCloud<PointT, LeafContainerT, BranchContainerT, OctreeT>::
- addPointIdx(const int point_idx_arg)
+ addPointIdx(const uindex_t point_idx_arg)
{
OctreeKey key;
- assert(point_idx_arg < static_cast<int>(input_->size()));
+ assert(point_idx_arg < input_->size());
const PointT& point = (*input_)[point_idx_arg];
LeafNode* leaf_node;
BranchNode* parent_branch_of_leaf_node;
- unsigned int depth_mask = this->createLeafRecursive(
+ auto depth_mask = this->createLeafRecursive(
key, this->depth_mask_, this->root_node_, leaf_node, parent_branch_of_leaf_node);
if (this->dynamic_depth_enabled_ && depth_mask) {
typename OctreeT>
const PointT&
pcl::octree::OctreePointCloud<PointT, LeafContainerT, BranchContainerT, OctreeT>::
- getPointByIndex(const unsigned int index_arg) const
+ getPointByIndex(const uindex_t index_arg) const
{
// retrieve point from input cloud
- assert(index_arg < static_cast<unsigned int>(input_->size()));
+ assert(index_arg < input_->size());
return ((*this->input_)[index_arg]);
}
pcl::octree::OctreePointCloud<PointT, LeafContainerT, BranchContainerT, OctreeT>::
getKeyBitSize()
{
- unsigned int max_voxels;
-
- unsigned int max_key_x;
- unsigned int max_key_y;
- unsigned int max_key_z;
-
- double octree_side_len;
-
const float minValue = std::numeric_limits<float>::epsilon();
// find maximum key values for x, y, z
- max_key_x =
- static_cast<unsigned int>(std::ceil((max_x_ - min_x_ - minValue) / resolution_));
- max_key_y =
- static_cast<unsigned int>(std::ceil((max_y_ - min_y_ - minValue) / resolution_));
- max_key_z =
- static_cast<unsigned int>(std::ceil((max_z_ - min_z_ - minValue) / resolution_));
+ const auto max_key_x =
+ static_cast<uindex_t>(std::ceil((max_x_ - min_x_ - minValue) / resolution_));
+ const auto max_key_y =
+ static_cast<uindex_t>(std::ceil((max_y_ - min_y_ - minValue) / resolution_));
+ const auto max_key_z =
+ static_cast<uindex_t>(std::ceil((max_z_ - min_z_ - minValue) / resolution_));
// find maximum amount of keys
- max_voxels = std::max(std::max(std::max(max_key_x, max_key_y), max_key_z),
- static_cast<unsigned int>(2));
+ const auto max_voxels =
+ std::max<uindex_t>(std::max(std::max(max_key_x, max_key_y), max_key_z), 2);
// tree depth == amount of bits of max_voxels
- this->octree_depth_ =
- std::max((std::min(static_cast<unsigned int>(OctreeKey::maxDepth),
- static_cast<unsigned int>(
- std::ceil(std::log2(max_voxels) - minValue)))),
- static_cast<unsigned int>(0));
+ this->octree_depth_ = std::max<uindex_t>(
+ std::min<uindex_t>(OctreeKey::maxDepth,
+ std::ceil(std::log2(max_voxels) - minValue)),
+ 0);
- octree_side_len = static_cast<double>(1 << this->octree_depth_) * resolution_;
+ const auto octree_side_len =
+ static_cast<double>(1 << this->octree_depth_) * resolution_;
if (this->leaf_count_ == 0) {
double octree_oversize_x;
genOctreeKeyforPoint(const PointT& point_arg, OctreeKey& key_arg) const
{
// calculate integer key for point coordinates
- key_arg.x =
- static_cast<unsigned int>((point_arg.x - this->min_x_) / this->resolution_);
- key_arg.y =
- static_cast<unsigned int>((point_arg.y - this->min_y_) / this->resolution_);
- key_arg.z =
- static_cast<unsigned int>((point_arg.z - this->min_z_) / this->resolution_);
+ key_arg.x = static_cast<uindex_t>((point_arg.x - this->min_x_) / this->resolution_);
+ key_arg.y = static_cast<uindex_t>((point_arg.y - this->min_y_) / this->resolution_);
+ key_arg.z = static_cast<uindex_t>((point_arg.z - this->min_z_) / this->resolution_);
assert(key_arg.x <= this->max_key_.x);
assert(key_arg.y <= this->max_key_.y);
typename OctreeT>
bool
pcl::octree::OctreePointCloud<PointT, LeafContainerT, BranchContainerT, OctreeT>::
- genOctreeKeyForDataT(const int& data_arg, OctreeKey& key_arg) const
+ genOctreeKeyForDataT(const index_t& data_arg, OctreeKey& key_arg) const
{
const PointT temp_point = getPointByIndex(data_arg);
void
pcl::octree::OctreePointCloud<PointT, LeafContainerT, BranchContainerT, OctreeT>::
genVoxelCenterFromOctreeKey(const OctreeKey& key_arg,
- unsigned int tree_depth_arg,
+ uindex_t tree_depth_arg,
PointT& point_arg) const
{
// generate point for voxel center defined by treedepth (bitLen) and key
void
pcl::octree::OctreePointCloud<PointT, LeafContainerT, BranchContainerT, OctreeT>::
genVoxelBoundsFromOctreeKey(const OctreeKey& key_arg,
- unsigned int tree_depth_arg,
+ uindex_t tree_depth_arg,
Eigen::Vector3f& min_pt,
Eigen::Vector3f& max_pt) const
{
typename OctreeT>
double
pcl::octree::OctreePointCloud<PointT, LeafContainerT, BranchContainerT, OctreeT>::
- getVoxelSquaredSideLen(unsigned int tree_depth_arg) const
+ getVoxelSquaredSideLen(uindex_t tree_depth_arg) const
{
double side_len;
typename OctreeT>
double
pcl::octree::OctreePointCloud<PointT, LeafContainerT, BranchContainerT, OctreeT>::
- getVoxelSquaredDiameter(unsigned int tree_depth_arg) const
+ getVoxelSquaredDiameter(uindex_t tree_depth_arg) const
{
// return the squared side length of the voxel cube as a function of the octree depth
return (getVoxelSquaredSideLen(tree_depth_arg) * 3);
typename LeafContainerT,
typename BranchContainerT,
typename OctreeT>
-int
+pcl::uindex_t
pcl::octree::OctreePointCloud<PointT, LeafContainerT, BranchContainerT, OctreeT>::
getOccupiedVoxelCentersRecursive(const BranchNode* node_arg,
const OctreeKey& key_arg,
AlignedPointTVector& voxel_center_list_arg) const
{
- int voxel_count = 0;
+ uindex_t voxel_count = 0;
// iterate over all children
for (unsigned char child_idx = 0; child_idx < 8; child_idx++) {
#pragma once
-#include <pcl/common/geometry.h>
#include <pcl/common/point_tests.h> // for pcl::isFinite
#include <pcl/console/print.h>
if (pcl::isFinite(temp)) // Make sure transformed point is finite - if it is not, it
// gets default key
{
- key_arg.x =
- static_cast<unsigned int>((temp.x - this->min_x_) / this->resolution_);
- key_arg.y =
- static_cast<unsigned int>((temp.y - this->min_y_) / this->resolution_);
- key_arg.z =
- static_cast<unsigned int>((temp.z - this->min_z_) / this->resolution_);
+ key_arg.x = static_cast<uindex_t>((temp.x - this->min_x_) / this->resolution_);
+ key_arg.y = static_cast<uindex_t>((temp.y - this->min_y_) / this->resolution_);
+ key_arg.z = static_cast<uindex_t>((temp.z - this->min_z_) / this->resolution_);
}
else {
key_arg = OctreeKey();
}
else {
// calculate integer key for point coordinates
- key_arg.x =
- static_cast<unsigned int>((point_arg.x - this->min_x_) / this->resolution_);
- key_arg.y =
- static_cast<unsigned int>((point_arg.y - this->min_y_) / this->resolution_);
- key_arg.z =
- static_cast<unsigned int>((point_arg.z - this->min_z_) / this->resolution_);
+ key_arg.x = static_cast<uindex_t>((point_arg.x - this->min_x_) / this->resolution_);
+ key_arg.y = static_cast<uindex_t>((point_arg.y - this->min_y_) / this->resolution_);
+ key_arg.z = static_cast<uindex_t>((point_arg.z - this->min_z_) / this->resolution_);
}
}
template <typename PointT, typename LeafContainerT, typename BranchContainerT>
void
pcl::octree::OctreePointCloudAdjacency<PointT, LeafContainerT, BranchContainerT>::
- addPointIdx(const int pointIdx_arg)
+ addPointIdx(const uindex_t pointIdx_arg)
{
OctreeKey key;
- assert(pointIdx_arg < static_cast<int>(this->input_->size()));
+ assert(pointIdx_arg < this->input_->size());
const PointT& point = (*this->input_)[pointIdx_arg];
if (!pcl::isFinite(point))
direction.normalize();
float precision = 1.0f;
const float step_size = static_cast<const float>(resolution_) * precision;
- const int nsteps = std::max(1, static_cast<int>(norm / step_size));
+ const auto nsteps = std::max<std::size_t>(1, norm / step_size);
OctreeKey prev_key = key;
// Walk along the line segment with small steps.
Eigen::Vector3f p = leaf_centroid;
PointT octree_p;
- for (int i = 0; i < nsteps; ++i) {
+ for (std::size_t i = 0; i < nsteps; ++i) {
// Start at the leaf voxel, and move back towards sensor.
p += (direction * step_size);
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT, typename LeafContainerT, typename BranchContainerT>
-std::size_t
+pcl::uindex_t
pcl::octree::OctreePointCloudVoxelCentroid<PointT, LeafContainerT, BranchContainerT>::
getVoxelCentroids(
typename OctreePointCloud<PointT, LeafContainerT, BranchContainerT>::
template <typename PointT, typename LeafContainerT, typename BranchContainerT>
bool
OctreePointCloudSearch<PointT, LeafContainerT, BranchContainerT>::voxelSearch(
- const PointT& point, std::vector<int>& point_idx_data)
+ const PointT& point, Indices& point_idx_data)
{
assert(isFinite(point) &&
"Invalid (NaN, Inf) point coordinates given to nearestKSearch!");
template <typename PointT, typename LeafContainerT, typename BranchContainerT>
bool
OctreePointCloudSearch<PointT, LeafContainerT, BranchContainerT>::voxelSearch(
- const int index, std::vector<int>& point_idx_data)
+ const uindex_t index, Indices& point_idx_data)
{
const PointT search_point = this->getPointByIndex(index);
return (this->voxelSearch(search_point, point_idx_data));
}
template <typename PointT, typename LeafContainerT, typename BranchContainerT>
-int
+uindex_t
OctreePointCloudSearch<PointT, LeafContainerT, BranchContainerT>::nearestKSearch(
const PointT& p_q,
- int k,
- std::vector<int>& k_indices,
+ uindex_t k,
+ Indices& k_indices,
std::vector<float>& k_sqr_distances)
{
assert(this->leaf_count_ > 0);
getKNearestNeighborRecursive(
p_q, k, this->root_node_, key, 1, smallest_dist, point_candidates);
- unsigned int result_count = static_cast<unsigned int>(point_candidates.size());
+ const auto result_count = static_cast<uindex_t>(point_candidates.size());
k_indices.resize(result_count);
k_sqr_distances.resize(result_count);
- for (unsigned int i = 0; i < result_count; ++i) {
+ for (uindex_t i = 0; i < result_count; ++i) {
k_indices[i] = point_candidates[i].point_idx_;
k_sqr_distances[i] = point_candidates[i].point_distance_;
}
- return static_cast<int>(k_indices.size());
+ return k_indices.size();
}
template <typename PointT, typename LeafContainerT, typename BranchContainerT>
-int
+uindex_t
OctreePointCloudSearch<PointT, LeafContainerT, BranchContainerT>::nearestKSearch(
- int index, int k, std::vector<int>& k_indices, std::vector<float>& k_sqr_distances)
+ uindex_t index, uindex_t k, Indices& k_indices, std::vector<float>& k_sqr_distances)
{
const PointT search_point = this->getPointByIndex(index);
return (nearestKSearch(search_point, k, k_indices, k_sqr_distances));
template <typename PointT, typename LeafContainerT, typename BranchContainerT>
void
OctreePointCloudSearch<PointT, LeafContainerT, BranchContainerT>::approxNearestSearch(
- const PointT& p_q, int& result_index, float& sqr_distance)
+ const PointT& p_q, index_t& result_index, float& sqr_distance)
{
assert(this->leaf_count_ > 0);
assert(isFinite(p_q) &&
template <typename PointT, typename LeafContainerT, typename BranchContainerT>
void
OctreePointCloudSearch<PointT, LeafContainerT, BranchContainerT>::approxNearestSearch(
- int query_index, int& result_index, float& sqr_distance)
+ uindex_t query_index, index_t& result_index, float& sqr_distance)
{
const PointT search_point = this->getPointByIndex(query_index);
}
template <typename PointT, typename LeafContainerT, typename BranchContainerT>
-int
+uindex_t
OctreePointCloudSearch<PointT, LeafContainerT, BranchContainerT>::radiusSearch(
const PointT& p_q,
const double radius,
- std::vector<int>& k_indices,
+ Indices& k_indices,
std::vector<float>& k_sqr_distances,
- unsigned int max_nn) const
+ uindex_t max_nn) const
{
assert(isFinite(p_q) &&
"Invalid (NaN, Inf) point coordinates given to nearestKSearch!");
k_sqr_distances,
max_nn);
- return (static_cast<int>(k_indices.size()));
+ return k_indices.size();
}
template <typename PointT, typename LeafContainerT, typename BranchContainerT>
-int
+uindex_t
OctreePointCloudSearch<PointT, LeafContainerT, BranchContainerT>::radiusSearch(
- int index,
+ uindex_t index,
const double radius,
- std::vector<int>& k_indices,
+ Indices& k_indices,
std::vector<float>& k_sqr_distances,
- unsigned int max_nn) const
+ uindex_t max_nn) const
{
const PointT search_point = this->getPointByIndex(index);
}
template <typename PointT, typename LeafContainerT, typename BranchContainerT>
-int
+uindex_t
OctreePointCloudSearch<PointT, LeafContainerT, BranchContainerT>::boxSearch(
const Eigen::Vector3f& min_pt,
const Eigen::Vector3f& max_pt,
- std::vector<int>& k_indices) const
+ Indices& k_indices) const
{
OctreeKey key;
boxSearchRecursive(min_pt, max_pt, this->root_node_, key, 1, k_indices);
- return (static_cast<int>(k_indices.size()));
+ return k_indices.size();
}
template <typename PointT, typename LeafContainerT, typename BranchContainerT>
OctreePointCloudSearch<PointT, LeafContainerT, BranchContainerT>::
getKNearestNeighborRecursive(
const PointT& point,
- unsigned int K,
+ uindex_t K,
const BranchNode* node,
const OctreeKey& key,
- unsigned int tree_depth,
+ uindex_t tree_depth,
const double squared_search_radius,
std::vector<prioPointQueueEntry>& point_candidates) const
{
child_node = search_heap.back().node;
new_key = search_heap.back().key;
- if (tree_depth < this->octree_depth_) {
+ if (child_node->getNodeType() == BRANCH_NODE) {
// we have not reached maximum tree depth
smallest_squared_dist =
getKNearestNeighborRecursive(point,
}
else {
// we reached leaf node level
- std::vector<int> decoded_point_vector;
+ Indices decoded_point_vector;
const LeafNode* child_leaf = static_cast<const LeafNode*>(child_node);
(*child_leaf)->getPointIndices(decoded_point_vector);
// Linearly iterate over all decoded (unsorted) points
- for (const int& point_index : decoded_point_vector) {
+ for (const auto& point_index : decoded_point_vector) {
const PointT& candidate_point = this->getPointByIndex(point_index);
const double radiusSquared,
const BranchNode* node,
const OctreeKey& key,
- unsigned int tree_depth,
- std::vector<int>& k_indices,
+ uindex_t tree_depth,
+ Indices& k_indices,
std::vector<float>& k_sqr_distances,
- unsigned int max_nn) const
+ uindex_t max_nn) const
{
// get spatial voxel information
double voxel_squared_diameter = this->getVoxelSquaredDiameter(tree_depth);
voxel_squared_diameter / 4.0 + radiusSquared +
sqrt(voxel_squared_diameter * radiusSquared)) {
- if (tree_depth < this->octree_depth_) {
+ if (child_node->getNodeType() == BRANCH_NODE) {
// we have not reached maximum tree depth
getNeighborsWithinRadiusRecursive(point,
radiusSquared,
k_indices,
k_sqr_distances,
max_nn);
- if (max_nn != 0 && k_indices.size() == static_cast<unsigned int>(max_nn))
+ if (max_nn != 0 && k_indices.size() == max_nn)
return;
}
else {
// we reached leaf node level
const LeafNode* child_leaf = static_cast<const LeafNode*>(child_node);
- std::vector<int> decoded_point_vector;
+ Indices decoded_point_vector;
// decode leaf node into decoded_point_vector
(*child_leaf)->getPointIndices(decoded_point_vector);
// Linearly iterate over all decoded (unsorted) points
- for (const int& index : decoded_point_vector) {
+ for (const auto& index : decoded_point_vector) {
const PointT& candidate_point = this->getPointByIndex(index);
// calculate point distance to search point
k_indices.push_back(index);
k_sqr_distances.push_back(squared_dist);
- if (max_nn != 0 && k_indices.size() == static_cast<unsigned int>(max_nn))
+ if (max_nn != 0 && k_indices.size() == max_nn)
return;
}
}
approxNearestSearchRecursive(const PointT& point,
const BranchNode* node,
const OctreeKey& key,
- unsigned int tree_depth,
- int& result_index,
+ uindex_t tree_depth,
+ index_t& result_index,
float& sqr_distance)
{
OctreeKey minChildKey;
child_node = this->getBranchChildPtr(*node, min_child_idx);
- if (tree_depth < this->octree_depth_) {
+ if (child_node->getNodeType() == BRANCH_NODE) {
// we have not reached maximum tree depth
approxNearestSearchRecursive(point,
static_cast<const BranchNode*>(child_node),
}
else {
// we reached leaf node level
- std::vector<int> decoded_point_vector;
+ Indices decoded_point_vector;
const LeafNode* child_leaf = static_cast<const LeafNode*>(child_node);
- double smallest_squared_dist = std::numeric_limits<double>::max();
+ float smallest_squared_dist = std::numeric_limits<float>::max();
// decode leaf node into decoded_point_vector
(**child_leaf).getPointIndices(decoded_point_vector);
// Linearly iterate over all decoded (unsorted) points
- for (const int& index : decoded_point_vector) {
+ for (const auto& index : decoded_point_vector) {
const PointT& candidate_point = this->getPointByIndex(index);
// calculate point distance to search point
- double squared_dist = pointSquaredDist(candidate_point, point);
+ float squared_dist = pointSquaredDist(candidate_point, point);
// check if a closer match is found
if (squared_dist >= smallest_squared_dist)
result_index = index;
smallest_squared_dist = squared_dist;
- sqr_distance = static_cast<float>(squared_dist);
+ sqr_distance = squared_dist;
}
}
}
const Eigen::Vector3f& max_pt,
const BranchNode* node,
const OctreeKey& key,
- unsigned int tree_depth,
- std::vector<int>& k_indices) const
+ uindex_t tree_depth,
+ Indices& k_indices) const
{
// iterate over all children
for (unsigned char child_idx = 0; child_idx < 8; child_idx++) {
(lower_voxel_corner(1) > max_pt(1)) || (min_pt(1) > upper_voxel_corner(1)) ||
(lower_voxel_corner(2) > max_pt(2)) || (min_pt(2) > upper_voxel_corner(2)))) {
- if (tree_depth < this->octree_depth_) {
+ if (child_node->getNodeType() == BRANCH_NODE) {
// we have not reached maximum tree depth
boxSearchRecursive(min_pt,
max_pt,
}
else {
// we reached leaf node level
- std::vector<int> decoded_point_vector;
+ Indices decoded_point_vector;
const LeafNode* child_leaf = static_cast<const LeafNode*>(child_node);
(**child_leaf).getPointIndices(decoded_point_vector);
// Linearly iterate over all decoded (unsorted) points
- for (const int& index : decoded_point_vector) {
+ for (const auto& index : decoded_point_vector) {
const PointT& candidate_point = this->getPointByIndex(index);
// check if point falls within search box
}
template <typename PointT, typename LeafContainerT, typename BranchContainerT>
-int
+uindex_t
OctreePointCloudSearch<PointT, LeafContainerT, BranchContainerT>::
getIntersectedVoxelCenters(Eigen::Vector3f origin,
Eigen::Vector3f direction,
AlignedPointTVector& voxel_center_list,
- int max_voxel_count) const
+ uindex_t max_voxel_count) const
{
OctreeKey key;
key.x = key.y = key.z = 0;
}
template <typename PointT, typename LeafContainerT, typename BranchContainerT>
-int
+uindex_t
OctreePointCloudSearch<PointT, LeafContainerT, BranchContainerT>::
getIntersectedVoxelIndices(Eigen::Vector3f origin,
Eigen::Vector3f direction,
- std::vector<int>& k_indices,
- int max_voxel_count) const
+ Indices& k_indices,
+ uindex_t max_voxel_count) const
{
OctreeKey key;
key.x = key.y = key.z = 0;
}
template <typename PointT, typename LeafContainerT, typename BranchContainerT>
-int
+uindex_t
OctreePointCloudSearch<PointT, LeafContainerT, BranchContainerT>::
getIntersectedVoxelCentersRecursive(double min_x,
double min_y,
const OctreeNode* node,
const OctreeKey& key,
AlignedPointTVector& voxel_center_list,
- int max_voxel_count) const
+ uindex_t max_voxel_count) const
{
if (max_x < 0.0 || max_y < 0.0 || max_z < 0.0)
return (0);
}
// Voxel intersection count for branches children
- int voxel_count = 0;
+ uindex_t voxel_count = 0;
// Voxel mid lines
double mid_x = 0.5 * (min_x + max_x);
double mid_z = 0.5 * (min_z + max_z);
// First voxel node ray will intersect
- int curr_node = getFirstIntersectedNode(min_x, min_y, min_z, mid_x, mid_y, mid_z);
+ auto curr_node = getFirstIntersectedNode(min_x, min_y, min_z, mid_x, mid_y, mid_z);
// Child index, node and key
unsigned char child_idx;
}
template <typename PointT, typename LeafContainerT, typename BranchContainerT>
-int
+uindex_t
OctreePointCloudSearch<PointT, LeafContainerT, BranchContainerT>::
getIntersectedVoxelIndicesRecursive(double min_x,
double min_y,
unsigned char a,
const OctreeNode* node,
const OctreeKey& key,
- std::vector<int>& k_indices,
- int max_voxel_count) const
+ Indices& k_indices,
+ uindex_t max_voxel_count) const
{
if (max_x < 0.0 || max_y < 0.0 || max_z < 0.0)
return (0);
}
// Voxel intersection count for branches children
- int voxel_count = 0;
+ uindex_t voxel_count = 0;
// Voxel mid lines
double mid_x = 0.5 * (min_x + max_x);
double mid_z = 0.5 * (min_z + max_z);
// First voxel node ray will intersect
- int curr_node = getFirstIntersectedNode(min_x, min_y, min_z, mid_x, mid_y, mid_z);
+ auto curr_node = getFirstIntersectedNode(min_x, min_y, min_z, mid_x, mid_y, mid_z);
// Child index, node and key
unsigned char child_idx;
* \ingroup octree
* \author Julius Kammerl (julius@kammerl.de)
*/
-template <typename LeafContainerT = int,
+template <typename LeafContainerT = index_t,
typename BranchContainerT = OctreeContainerEmpty>
class Octree2BufBase {
using Iterator = OctreeDepthFirstIterator<OctreeT>;
using ConstIterator = const OctreeDepthFirstIterator<OctreeT>;
Iterator
- begin(unsigned int max_depth_arg = 0)
+ begin(uindex_t max_depth_arg = 0)
{
return Iterator(this, max_depth_arg);
};
using LeafNodeIterator = OctreeLeafNodeDepthFirstIterator<OctreeT>;
using ConstLeafNodeIterator = const OctreeLeafNodeDepthFirstIterator<OctreeT>;
- PCL_DEPRECATED(1, 12, "use leaf_depth_begin() instead")
- LeafNodeIterator
- leaf_begin(unsigned int max_depth_arg = 0)
- {
- return LeafNodeIterator(this, max_depth_arg);
- };
-
- PCL_DEPRECATED(1, 12, "use leaf_depth_end() instead")
- const LeafNodeIterator
- leaf_end()
- {
- return LeafNodeIterator();
- };
-
// The currently valide names
using LeafNodeDepthFirstIterator = OctreeLeafNodeDepthFirstIterator<OctreeT>;
using ConstLeafNodeDepthFirstIterator =
const OctreeLeafNodeDepthFirstIterator<OctreeT>;
LeafNodeDepthFirstIterator
- leaf_depth_begin(unsigned int max_depth_arg = 0)
+ leaf_depth_begin(uindex_t max_depth_arg = 0)
{
return LeafNodeDepthFirstIterator(this, max_depth_arg);
};
using DepthFirstIterator = OctreeDepthFirstIterator<OctreeT>;
using ConstDepthFirstIterator = const OctreeDepthFirstIterator<OctreeT>;
DepthFirstIterator
- depth_begin(unsigned int maxDepth_arg = 0)
+ depth_begin(uindex_t maxDepth_arg = 0)
{
return DepthFirstIterator(this, maxDepth_arg);
};
using BreadthFirstIterator = OctreeBreadthFirstIterator<OctreeT>;
using ConstBreadthFirstIterator = const OctreeBreadthFirstIterator<OctreeT>;
BreadthFirstIterator
- breadth_begin(unsigned int max_depth_arg = 0)
+ breadth_begin(uindex_t max_depth_arg = 0)
{
return BreadthFirstIterator(this, max_depth_arg);
};
const OctreeLeafNodeBreadthFirstIterator<OctreeT>;
LeafNodeBreadthIterator
- leaf_breadth_begin(unsigned int max_depth_arg = 0u)
+ leaf_breadth_begin(uindex_t max_depth_arg = 0u)
{
return LeafNodeBreadthIterator(this,
max_depth_arg ? max_depth_arg : this->octree_depth_);
* \param max_voxel_index_arg: maximum amount of voxels per dimension
*/
void
- setMaxVoxelIndex(unsigned int max_voxel_index_arg);
+ setMaxVoxelIndex(uindex_t max_voxel_index_arg);
/** \brief Set the maximum depth of the octree.
* \param depth_arg: maximum depth of octree
*/
void
- setTreeDepth(unsigned int depth_arg);
+ setTreeDepth(uindex_t depth_arg);
/** \brief Get the maximum depth of the octree.
* \return depth_arg: maximum depth of octree
*/
- inline unsigned int
+ inline uindex_t
getTreeDepth() const
{
return this->octree_depth_;
* \return pointer to new leaf node container.
*/
LeafContainerT*
- createLeaf(unsigned int idx_x_arg, unsigned int idx_y_arg, unsigned int idx_z_arg);
+ createLeaf(uindex_t idx_x_arg, uindex_t idx_y_arg, uindex_t idx_z_arg);
/** \brief Find leaf node at (idx_x_arg, idx_y_arg, idx_z_arg).
* \note If leaf node already exist, this method returns the existing node
* \return pointer to leaf node container if found, null pointer otherwise.
*/
LeafContainerT*
- findLeaf(unsigned int idx_x_arg, unsigned int idx_y_arg, unsigned int idx_z_arg);
+ findLeaf(uindex_t idx_x_arg, uindex_t idx_y_arg, uindex_t idx_z_arg);
/** \brief Check for the existence of leaf node at (idx_x_arg, idx_y_arg, idx_z_arg).
* \param idx_x_arg: index of leaf node in the X axis.
* \return "true" if leaf node search is successful, otherwise it returns "false".
*/
bool
- existLeaf(unsigned int idx_x_arg,
- unsigned int idx_y_arg,
- unsigned int idx_z_arg) const;
+ existLeaf(uindex_t idx_x_arg, uindex_t idx_y_arg, uindex_t idx_z_arg) const;
/** \brief Remove leaf node at (idx_x_arg, idx_y_arg, idx_z_arg).
* \param idx_x_arg: index of leaf node in the X axis.
* \param idx_z_arg: index of leaf node in the Z axis.
*/
void
- removeLeaf(unsigned int idx_x_arg, unsigned int idx_y_arg, unsigned int idx_z_arg);
+ removeLeaf(uindex_t idx_x_arg, uindex_t idx_y_arg, uindex_t idx_z_arg);
/** \brief Return the amount of existing leafs in the octree.
* \return amount of registered leaf nodes.
* \param branch_reset_arg: Reset pointer array of current branch
* \return depth mask at which leaf node was created/found
**/
- unsigned int
+ uindex_t
createLeafRecursive(const OctreeKey& key_arg,
- unsigned int depth_mask_arg,
+ uindex_t depth_mask_arg,
BranchNode* branch_arg,
LeafNode*& return_leaf_arg,
BranchNode*& parent_of_leaf_arg,
**/
void
findLeafRecursive(const OctreeKey& key_arg,
- unsigned int depth_mask_arg,
+ uindex_t depth_mask_arg,
BranchNode* branch_arg,
LeafContainerT*& result_arg) const;
**/
bool
deleteLeafRecursive(const OctreeKey& key_arg,
- unsigned int depth_mask_arg,
+ uindex_t depth_mask_arg,
BranchNode* branch_arg);
/** \brief Recursively explore the octree and output binary octree description
void
deserializeTreeRecursive(
BranchNode* branch_arg,
- unsigned int depth_mask_arg,
+ uindex_t depth_mask_arg,
OctreeKey& key_arg,
typename std::vector<char>::const_iterator& binary_tree_in_it_arg,
typename std::vector<char>::const_iterator& binary_tree_in_it_end_arg,
void
treeCleanUpRecursive(BranchNode* branch_arg);
- /** \brief Helper function to calculate the binary logarithm
- * \param n_arg: some value
- * \return binary logarithm (log2) of argument n_arg
- */
- PCL_DEPRECATED(1, 12, "use std::log2 instead") inline double Log2(double n_arg)
- {
- return std::log2(n_arg);
- }
-
/** \brief Test if octree is able to dynamically change its depth. This is required
* for adaptive bounding box adjustment.
* \return "false" - not resizeable due to XOR serialization
BranchNode* root_node_;
/** \brief Depth mask based on octree depth **/
- unsigned int depth_mask_;
+ uindex_t depth_mask_;
/** \brief key range */
OctreeKey max_key_;
/** \brief Currently active octree buffer **/
unsigned char buffer_selector_;
- // flags indicating if unused branches and leafs might exist in previous buffer
+ /** \brief flags indicating if unused branches and leafs might exist in previous
+ * buffer **/
bool tree_dirty_flag_;
/** \brief Octree depth */
- unsigned int octree_depth_;
+ uindex_t octree_depth_;
/** \brief Enable dynamic_depth
* \note Note that this parameter is ignored in octree2buf! */
* \ingroup octree
* \author Julius Kammerl (julius@kammerl.de)
*/
-template <typename LeafContainerT = int,
+template <typename LeafContainerT = index_t,
typename BranchContainerT = OctreeContainerEmpty>
class OctreeBase {
public:
BranchNode* root_node_;
/** \brief Depth mask based on octree depth **/
- unsigned int depth_mask_;
+ uindex_t depth_mask_;
/** \brief Octree depth */
- unsigned int octree_depth_;
+ uindex_t octree_depth_;
/** \brief Enable dynamic_depth **/
bool dynamic_depth_enabled_;
using ConstIterator = const OctreeDepthFirstIterator<OctreeT>;
Iterator
- begin(unsigned int max_depth_arg = 0u)
+ begin(uindex_t max_depth_arg = 0u)
{
return Iterator(this, max_depth_arg ? max_depth_arg : this->octree_depth_);
};
using LeafNodeIterator = OctreeLeafNodeDepthFirstIterator<OctreeT>;
using ConstLeafNodeIterator = const OctreeLeafNodeDepthFirstIterator<OctreeT>;
- PCL_DEPRECATED(1, 12, "use leaf_depth_begin() instead")
- LeafNodeIterator
- leaf_begin(unsigned int max_depth_arg = 0u)
- {
- return LeafNodeIterator(this, max_depth_arg ? max_depth_arg : this->octree_depth_);
- };
-
- PCL_DEPRECATED(1, 12, "use leaf_depth_end() instead")
- const LeafNodeIterator
- leaf_end()
- {
- return LeafNodeIterator(this, 0, nullptr);
- };
-
// The currently valide names
using LeafNodeDepthFirstIterator = OctreeLeafNodeDepthFirstIterator<OctreeT>;
using ConstLeafNodeDepthFirstIterator =
const OctreeLeafNodeDepthFirstIterator<OctreeT>;
LeafNodeDepthFirstIterator
- leaf_depth_begin(unsigned int max_depth_arg = 0u)
+ leaf_depth_begin(uindex_t max_depth_arg = 0u)
{
return LeafNodeDepthFirstIterator(
this, max_depth_arg ? max_depth_arg : this->octree_depth_);
using ConstDepthFirstIterator = const OctreeDepthFirstIterator<OctreeT>;
DepthFirstIterator
- depth_begin(unsigned int max_depth_arg = 0u)
+ depth_begin(uindex_t max_depth_arg = 0u)
{
return DepthFirstIterator(this,
max_depth_arg ? max_depth_arg : this->octree_depth_);
using ConstBreadthFirstIterator = const OctreeBreadthFirstIterator<OctreeT>;
BreadthFirstIterator
- breadth_begin(unsigned int max_depth_arg = 0u)
+ breadth_begin(uindex_t max_depth_arg = 0u)
{
return BreadthFirstIterator(this,
max_depth_arg ? max_depth_arg : this->octree_depth_);
using ConstFixedDepthIterator = const OctreeFixedDepthIterator<OctreeT>;
FixedDepthIterator
- fixed_depth_begin(unsigned int fixed_depth_arg = 0u)
+ fixed_depth_begin(uindex_t fixed_depth_arg = 0u)
{
return FixedDepthIterator(this, fixed_depth_arg);
};
const OctreeLeafNodeBreadthFirstIterator<OctreeT>;
LeafNodeBreadthFirstIterator
- leaf_breadth_begin(unsigned int max_depth_arg = 0u)
+ leaf_breadth_begin(uindex_t max_depth_arg = 0u)
{
return LeafNodeBreadthFirstIterator(
this, max_depth_arg ? max_depth_arg : this->octree_depth_);
* \param[in] max_voxel_index_arg maximum amount of voxels per dimension
*/
void
- setMaxVoxelIndex(unsigned int max_voxel_index_arg);
+ setMaxVoxelIndex(uindex_t max_voxel_index_arg);
/** \brief Set the maximum depth of the octree.
* \param max_depth_arg: maximum depth of octree
*/
void
- setTreeDepth(unsigned int max_depth_arg);
+ setTreeDepth(uindex_t max_depth_arg);
/** \brief Get the maximum depth of the octree.
* \return depth_arg: maximum depth of octree
*/
- unsigned int
+ uindex_t
getTreeDepth() const
{
return this->octree_depth_;
* \return pointer to new leaf node container.
*/
LeafContainerT*
- createLeaf(unsigned int idx_x_arg, unsigned int idx_y_arg, unsigned int idx_z_arg);
+ createLeaf(uindex_t idx_x_arg, uindex_t idx_y_arg, uindex_t idx_z_arg);
/** \brief Find leaf node at (idx_x_arg, idx_y_arg, idx_z_arg).
* \note If leaf node already exist, this method returns the existing node
* \return pointer to leaf node container if found, null pointer otherwise.
*/
LeafContainerT*
- findLeaf(unsigned int idx_x_arg, unsigned int idx_y_arg, unsigned int idx_z_arg);
+ findLeaf(uindex_t idx_x_arg, uindex_t idx_y_arg, uindex_t idx_z_arg);
/** \brief idx_x_arg for the existence of leaf node at (idx_x_arg, idx_y_arg,
* idx_z_arg).
* \return "true" if leaf node search is successful, otherwise it returns "false".
*/
bool
- existLeaf(unsigned int idx_x_arg,
- unsigned int idx_y_arg,
- unsigned int idx_z_arg) const;
+ existLeaf(uindex_t idx_x_arg, uindex_t idx_y_arg, uindex_t idx_z_arg) const;
/** \brief Remove leaf node at (idx_x_arg, idx_y_arg, idx_z_arg).
* \param idx_x_arg: index of leaf node in the X axis.
* \param idx_z_arg: index of leaf node in the Z axis.
*/
void
- removeLeaf(unsigned int idx_x_arg, unsigned int idx_y_arg, unsigned int idx_z_arg);
+ removeLeaf(uindex_t idx_x_arg, uindex_t idx_y_arg, uindex_t idx_z_arg);
/** \brief Return the amount of existing leafs in the octree.
* \return amount of registered leaf nodes.
* \param parent_of_leaf_arg: return pointer to parent of leaf node
* \return depth mask at which leaf node was created
**/
- unsigned int
+ uindex_t
createLeafRecursive(const OctreeKey& key_arg,
- unsigned int depth_mask_arg,
+ uindex_t depth_mask_arg,
BranchNode* branch_arg,
LeafNode*& return_leaf_arg,
BranchNode*& parent_of_leaf_arg);
**/
void
findLeafRecursive(const OctreeKey& key_arg,
- unsigned int depth_mask_arg,
+ uindex_t depth_mask_arg,
BranchNode* branch_arg,
LeafContainerT*& result_arg) const;
**/
bool
deleteLeafRecursive(const OctreeKey& key_arg,
- unsigned int depth_mask_arg,
+ uindex_t depth_mask_arg,
BranchNode* branch_arg);
/** \brief Recursively explore the octree and output binary octree description
void
deserializeTreeRecursive(
BranchNode* branch_arg,
- unsigned int depth_mask_arg,
+ uindex_t depth_mask_arg,
OctreeKey& key_arg,
typename std::vector<char>::const_iterator& binary_tree_input_it_arg,
typename std::vector<char>::const_iterator& binary_tree_input_it_end_arg,
// Helpers
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
- /** \brief Helper function to calculate the binary logarithm
- * \param n_arg: some value
- * \return binary logarithm (log2) of argument n_arg
- */
- PCL_DEPRECATED(1, 12, "use std::log2 instead") double Log2(double n_arg)
- {
- return std::log2(n_arg);
- }
-
/** \brief Test if octree is able to dynamically change its depth. This is required
*for adaptive bounding box adjustment.
* \return "true"
#pragma once
+#include <pcl/types.h>
+
#include <cassert>
#include <cstddef>
#include <vector>
/** \brief Pure abstract method to get size of container (number of indices)
* \return number of points/indices stored in leaf node container.
*/
- virtual std::size_t
+ virtual uindex_t
getSize() const
{
return 0u;
* indices.
*/
void
- addPointIndex(const int&)
+ addPointIndex(const index_t&)
{}
/** \brief Empty getPointIndex implementation as this leaf node does not store any
* point indices.
*/
void
- getPointIndex(int&) const
+ getPointIndex(index_t&) const
{}
/** \brief Empty getPointIndices implementation as this leaf node does not store any
* data. \
*/
void
- getPointIndices(std::vector<int>&) const
+ getPointIndices(Indices&) const
{}
};
/** \brief Abstract get size of container (number of DataT objects)
* \return number of DataT elements in leaf node container.
*/
- std::size_t
+ uindex_t
getSize() const override
{
return 0;
/** \brief Empty addPointIndex implementation. This leaf node does not store any point
* indices.
*/
- void
- addPointIndex(int)
- {}
+ void addPointIndex(index_t) {}
/** \brief Empty getPointIndex implementation as this leaf node does not store any
* point indices.
*/
- int
+ index_t
getPointIndex() const
{
assert("getPointIndex: undefined point index");
* data.
*/
void
- getPointIndices(std::vector<int>&) const
+ getPointIndices(Indices&) const
{}
};
* \param[in] data_arg index to be stored within leaf node.
*/
void
- addPointIndex(int data_arg)
+ addPointIndex(index_t data_arg)
{
data_ = data_arg;
}
* point index
* \return index stored within container.
*/
- int
+ index_t
getPointIndex() const
{
return data_;
* data vector
*/
void
- getPointIndices(std::vector<int>& data_vector_arg) const
+ getPointIndices(Indices& data_vector_arg) const
{
- if (data_ >= 0)
+ if (data_ != static_cast<index_t>(-1))
data_vector_arg.push_back(data_);
}
/** \brief Get size of container (number of DataT objects)
* \return number of DataT elements in leaf node container.
*/
- std::size_t
+ uindex_t
getSize() const override
{
- return data_ < 0 ? 0 : 1;
+ return data_ != static_cast<index_t>(-1) ? 0 : 1;
}
/** \brief Reset leaf node memory to zero. */
void
reset() override
{
- data_ = -1;
+ data_ = static_cast<index_t>(-1);
}
protected:
/** \brief Point index stored in octree. */
- int data_;
+ index_t data_;
};
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
* \param[in] data_arg index to be stored within leaf node.
*/
void
- addPointIndex(int data_arg)
+ addPointIndex(index_t data_arg)
{
leafDataTVector_.push_back(data_arg);
}
* point indices.
* \return index stored within container.
*/
- int
+ index_t
getPointIndex() const
{
return leafDataTVector_.back();
* within data vector
*/
void
- getPointIndices(std::vector<int>& data_vector_arg) const
+ getPointIndices(Indices& data_vector_arg) const
{
data_vector_arg.insert(
data_vector_arg.end(), leafDataTVector_.begin(), leafDataTVector_.end());
* of point indices.
* \return reference to vector of point indices to be stored within data vector
*/
- std::vector<int>&
+ Indices&
getPointIndicesVector()
{
return leafDataTVector_;
/** \brief Get size of container (number of indices)
* \return number of point indices in container.
*/
- std::size_t
+ uindex_t
getSize() const override
{
- return leafDataTVector_.size();
+ return static_cast<uindex_t>(leafDataTVector_.size());
}
/** \brief Reset leaf node. Clear DataT vector.*/
protected:
/** \brief Leaf node DataT vector. */
- std::vector<int> leafDataTVector_;
+ Indices leafDataTVector_;
};
} // namespace octree
struct IteratorState {
OctreeNode* node_;
OctreeKey key_;
- unsigned int depth_;
+ uindex_t depth_;
};
/** \brief @b Abstract octree iterator class
/** \brief Empty constructor.
*/
- explicit OctreeIteratorBase(unsigned int max_depth_arg = 0)
- : octree_(0), current_state_(0), max_octree_depth_(max_depth_arg)
+ OctreeIteratorBase() : OctreeIteratorBase(nullptr, 0u) {}
+
+ /** \brief Constructor.
+ * \param[in] max_depth_arg Depth limitation during traversal
+ */
+ explicit OctreeIteratorBase(uindex_t max_depth_arg)
+ : octree_(nullptr), current_state_(nullptr), max_octree_depth_(max_depth_arg)
{
this->reset();
}
* root node.
* \param[in] max_depth_arg Depth limitation during traversal
*/
- explicit OctreeIteratorBase(OctreeT* octree_arg, unsigned int max_depth_arg = 0)
+ OctreeIteratorBase(OctreeT* octree_arg) : OctreeIteratorBase(octree_arg, 0u) {}
+
+ /** \brief Constructor.
+ * \param[in] octree_arg Octree to be iterated. Initially the iterator is set to its
+ * root node.
+ * \param[in] max_depth_arg Depth limitation during traversal
+ */
+ explicit OctreeIteratorBase(OctreeT* octree_arg, uindex_t max_depth_arg)
: octree_(octree_arg), current_state_(0), max_octree_depth_(max_depth_arg)
{
this->reset();
* \warning For advanced users only.
*/
explicit OctreeIteratorBase(OctreeT* octree_arg,
- unsigned int max_depth_arg,
+ uindex_t max_depth_arg,
IteratorState* current_state)
: octree_(octree_arg), current_state_(current_state), max_octree_depth_(max_depth_arg)
{}
/** \brief Get the current depth level of octree
* \return depth level
*/
- inline unsigned int
+ inline uindex_t
getCurrentOctreeDepth() const
{
assert(octree_ != 0);
if (current_state_) {
const OctreeKey& key = getCurrentOctreeKey();
// calculate integer id with respect to octree key
- unsigned int depth = octree_->getTreeDepth();
+ uindex_t depth = octree_->getTreeDepth();
id = static_cast<unsigned long>(key.x) << (depth * 2) |
static_cast<unsigned long>(key.y) << (depth * 1) |
static_cast<unsigned long>(key.z) << (depth * 0);
IteratorState* current_state_;
/** \brief Maximum octree depth */
- unsigned int max_octree_depth_;
+ uindex_t max_octree_depth_;
};
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/** \brief Empty constructor.
* \param[in] max_depth_arg Depth limitation during traversal
*/
- explicit OctreeDepthFirstIterator(unsigned int max_depth_arg = 0);
+ explicit OctreeDepthFirstIterator(uindex_t max_depth_arg = 0);
/** \brief Constructor.
* \param[in] octree_arg Octree to be iterated. Initially the iterator is set to its
* root node.
* \param[in] max_depth_arg Depth limitation during traversal
*/
- explicit OctreeDepthFirstIterator(OctreeT* octree_arg,
- unsigned int max_depth_arg = 0);
+ explicit OctreeDepthFirstIterator(OctreeT* octree_arg, uindex_t max_depth_arg = 0);
/** \brief Constructor.
* \param[in] octree_arg Octree to be iterated. Initially the iterator is set to its
*/
explicit OctreeDepthFirstIterator(
OctreeT* octree_arg,
- unsigned int max_depth_arg,
+ uindex_t max_depth_arg,
IteratorState* current_state,
const std::vector<IteratorState>& stack = std::vector<IteratorState>())
: OctreeIteratorBase<OctreeT>(octree_arg, max_depth_arg, current_state), stack_(stack)
/** \brief Empty constructor.
* \param[in] max_depth_arg Depth limitation during traversal
*/
- explicit OctreeBreadthFirstIterator(unsigned int max_depth_arg = 0);
+ explicit OctreeBreadthFirstIterator(uindex_t max_depth_arg = 0);
/** \brief Constructor.
* \param[in] octree_arg Octree to be iterated. Initially the iterator is set to its
* root node.
* \param[in] max_depth_arg Depth limitation during traversal
*/
- explicit OctreeBreadthFirstIterator(OctreeT* octree_arg,
- unsigned int max_depth_arg = 0);
+ explicit OctreeBreadthFirstIterator(OctreeT* octree_arg, uindex_t max_depth_arg = 0);
/** \brief Constructor.
* \param[in] octree_arg Octree to be iterated. Initially the iterator is set to its
*/
explicit OctreeBreadthFirstIterator(
OctreeT* octree_arg,
- unsigned int max_depth_arg,
+ uindex_t max_depth_arg,
IteratorState* current_state,
const std::deque<IteratorState>& fifo = std::deque<IteratorState>())
: OctreeIteratorBase<OctreeT>(octree_arg, max_depth_arg, current_state), FIFO_(fifo)
* root node.
* \param[in] fixed_depth_arg Depth level during traversal
*/
- explicit OctreeFixedDepthIterator(OctreeT* octree_arg,
- unsigned int fixed_depth_arg = 0);
+ explicit OctreeFixedDepthIterator(OctreeT* octree_arg, uindex_t fixed_depth_arg = 0);
/** \brief Constructor.
* \param[in] octree_arg Octree to be iterated. Initially the iterator is set to its
*/
OctreeFixedDepthIterator(
OctreeT* octree_arg,
- unsigned int fixed_depth_arg,
+ uindex_t fixed_depth_arg,
IteratorState* current_state,
const std::deque<IteratorState>& fifo = std::deque<IteratorState>())
: OctreeBreadthFirstIterator<OctreeT>(
* \param[in] fixed_depth_arg Depth level during traversal
*/
void
- reset(unsigned int fixed_depth_arg);
+ reset(uindex_t fixed_depth_arg);
/** \brief Reset the iterator to the first node at the current depth
*/
using OctreeBreadthFirstIterator<OctreeT>::FIFO_;
/** \brief Given level of the node to be iterated */
- unsigned int fixed_depth_;
+ uindex_t fixed_depth_;
};
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/** \brief Empty constructor.
* \param[in] max_depth_arg Depth limitation during traversal
*/
- explicit OctreeLeafNodeDepthFirstIterator(unsigned int max_depth_arg = 0)
+ explicit OctreeLeafNodeDepthFirstIterator(uindex_t max_depth_arg = 0)
: OctreeDepthFirstIterator<OctreeT>(max_depth_arg)
{
reset();
* \param[in] max_depth_arg Depth limitation during traversal
*/
explicit OctreeLeafNodeDepthFirstIterator(OctreeT* octree_arg,
- unsigned int max_depth_arg = 0)
+ uindex_t max_depth_arg = 0)
: OctreeDepthFirstIterator<OctreeT>(octree_arg, max_depth_arg)
{
reset();
*/
explicit OctreeLeafNodeDepthFirstIterator(
OctreeT* octree_arg,
- unsigned int max_depth_arg,
+ uindex_t max_depth_arg,
IteratorState* current_state,
const std::vector<IteratorState>& stack = std::vector<IteratorState>())
: OctreeDepthFirstIterator<OctreeT>(octree_arg, max_depth_arg, current_state, stack)
/** \brief Empty constructor.
* \param[in] max_depth_arg Depth limitation during traversal
*/
- explicit OctreeLeafNodeBreadthFirstIterator(unsigned int max_depth_arg = 0);
+ explicit OctreeLeafNodeBreadthFirstIterator(uindex_t max_depth_arg = 0);
/** \brief Constructor.
* \param[in] octree_arg Octree to be iterated. Initially the iterator is set to its
* \param[in] max_depth_arg Depth limitation during traversal
*/
explicit OctreeLeafNodeBreadthFirstIterator(OctreeT* octree_arg,
- unsigned int max_depth_arg = 0);
+ uindex_t max_depth_arg = 0);
/** \brief Copy constructor.
* \param[in] octree_arg Octree to be iterated. Initially the iterator is set to its
*/
explicit OctreeLeafNodeBreadthFirstIterator(
OctreeT* octree_arg,
- unsigned int max_depth_arg,
+ uindex_t max_depth_arg,
IteratorState* current_state,
const std::deque<IteratorState>& fifo = std::deque<IteratorState>());
OctreeKey() : x(0), y(0), z(0) {}
/** \brief Constructor for key initialization. */
- OctreeKey(unsigned int keyX, unsigned int keyY, unsigned int keyZ)
- : x(keyX), y(keyY), z(keyZ)
- {}
+ OctreeKey(uindex_t keyX, uindex_t keyY, uindex_t keyZ) : x(keyX), y(keyY), z(keyZ) {}
/** \brief Copy constructor. */
OctreeKey(const OctreeKey& source) { std::memcpy(key_, source.key_, sizeof(key_)); }
* \return child node index
* */
inline unsigned char
- getChildIdxWithDepthMask(unsigned int depthMask) const
+ getChildIdxWithDepthMask(uindex_t depthMask) const
{
return static_cast<unsigned char>(((!!(this->x & depthMask)) << 2) |
((!!(this->y & depthMask)) << 1) |
/* \brief maximum depth that can be addressed */
static const unsigned char maxDepth =
- static_cast<unsigned char>(sizeof(std::uint32_t) * 8);
+ static_cast<unsigned char>(sizeof(uindex_t) * 8);
// Indices addressing a voxel at (X, Y, Z)
union {
struct {
- std::uint32_t x;
- std::uint32_t y;
- std::uint32_t z;
+ uindex_t x;
+ uindex_t y;
+ uindex_t z;
};
- std::uint32_t key_[3];
+ uindex_t key_[3];
};
};
} // namespace octree
#include <pcl/memory.h>
#include <pcl/pcl_macros.h>
-#include <Eigen/Core>
-
#include <cassert>
-#include <cstddef>
namespace pcl {
namespace octree {
OctreePointCloud(const double resolution_arg);
// public typedefs
- using IndicesPtr = shared_ptr<std::vector<int>>;
- using IndicesConstPtr = shared_ptr<const std::vector<int>>;
+ using IndicesPtr = shared_ptr<Indices>;
+ using IndicesConstPtr = shared_ptr<const Indices>;
using PointCloud = pcl::PointCloud<PointT>;
using PointCloudPtr = typename PointCloud::Ptr;
/** \brief Get the maximum depth of the octree.
* \return depth_arg: maximum depth of octree
* */
- inline unsigned int
+ inline uindex_t
getTreeDepth() const
{
return this->octree_depth_;
* setInputCloud)
*/
void
- addPointFromCloud(const int point_idx_arg, IndicesPtr indices_arg);
+ addPointFromCloud(uindex_t point_idx_arg, IndicesPtr indices_arg);
/** \brief Add point simultaneously to octree and input point cloud.
* \param[in] point_arg point to be added
* \return "true" if voxel exist; "false" otherwise
*/
bool
- isVoxelOccupiedAtPoint(const int& point_idx_arg) const;
+ isVoxelOccupiedAtPoint(const index_t& point_idx_arg) const;
/** \brief Get a PointT vector of centers of all occupied voxels.
* \param[out] voxel_center_list_arg results are written to this vector of PointT
* elements
* \return number of occupied voxels
*/
- int
+ uindex_t
getOccupiedVoxelCenters(AlignedPointTVector& voxel_center_list_arg) const;
/** \brief Get a PointT vector of centers of voxels intersected by a line segment.
* octree_resolution x precision
* \return number of intersected voxels
*/
- int
+ uindex_t
getApproxIntersectedVoxelCentersBySegment(const Eigen::Vector3f& origin,
const Eigen::Vector3f& end,
AlignedPointTVector& voxel_center_list,
* \param[in] point_idx_arg index of point addressing the voxel to be deleted.
*/
void
- deleteVoxelAtPoint(const int& point_idx_arg);
+ deleteVoxelAtPoint(const index_t& point_idx_arg);
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Bounding box methods
* \return squared diameter
*/
double
- getVoxelSquaredDiameter(unsigned int tree_depth_arg) const;
+ getVoxelSquaredDiameter(uindex_t tree_depth_arg) const;
/** \brief Calculates the squared diameter of a voxel at leaf depth
* \return squared diameter
* \return squared voxel cube side length
*/
double
- getVoxelSquaredSideLen(unsigned int tree_depth_arg) const;
+ getVoxelSquaredSideLen(uindex_t tree_depth_arg) const;
/** \brief Calculates the squared voxel cube side length at leaf level
* \return squared voxel cube side length
* \a setInputCloud to be added
*/
virtual void
- addPointIdx(const int point_idx_arg);
+ addPointIdx(uindex_t point_idx_arg);
/** \brief Add point at index from input pointcloud dataset to octree
* \param[in] leaf_node to be expanded
expandLeafNode(LeafNode* leaf_node,
BranchNode* parent_branch,
unsigned char child_idx,
- unsigned int depth_mask);
+ uindex_t depth_mask);
/** \brief Get point at index from input pointcloud dataset
* \param[in] index_arg index representing the point in the dataset given by \a
* \return PointT from input pointcloud dataset
*/
const PointT&
- getPointByIndex(const unsigned int index_arg) const;
+ getPointByIndex(uindex_t index_arg) const;
/** \brief Find octree leaf node at a given point
* \param[in] point_arg query point
* are assignable
*/
virtual bool
- genOctreeKeyForDataT(const int& data_arg, OctreeKey& key_arg) const;
+ genOctreeKeyForDataT(const index_t& data_arg, OctreeKey& key_arg) const;
/** \brief Generate a point at center of leaf node voxel
* \param[in] key_arg octree key addressing a leaf node.
*/
void
genVoxelCenterFromOctreeKey(const OctreeKey& key_arg,
- unsigned int tree_depth_arg,
+ uindex_t tree_depth_arg,
PointT& point_arg) const;
/** \brief Generate bounds of an octree voxel using octree key and tree depth
*/
void
genVoxelBoundsFromOctreeKey(const OctreeKey& key_arg,
- unsigned int tree_depth_arg,
+ uindex_t tree_depth_arg,
Eigen::Vector3f& min_pt,
Eigen::Vector3f& max_pt) const;
* elements
* \return number of voxels found
*/
- int
+ uindex_t
getOccupiedVoxelCentersRecursive(const BranchNode* node_arg,
const OctreeKey& key_arg,
AlignedPointTVector& voxel_center_list_arg) const;
#pragma once
-#include <pcl/octree/boost.h>
#include <pcl/octree/octree_pointcloud.h>
#include <pcl/octree/octree_pointcloud_adjacency_container.h>
-#include <list>
-#include <set>
+#include <boost/graph/adjacency_list.hpp> // for adjacency_list
namespace pcl {
* \note This virtual implementation allows the use of a transform function to compute
* keys. */
void
- addPointIdx(const int point_idx_arg) override;
+ addPointIdx(uindex_t point_idx_arg) override;
/** \brief Fills in the neighbors fields for new voxels.
*
#pragma once
+#include <list> // for std::list
+
namespace pcl {
namespace octree {
}
/** \brief Gets the number of points contributing to this leaf */
- int
+ uindex_t
getPointCounter() const
{
return num_points_;
/** \brief virtual method to get size of container
* \return number of points added to leaf node container.
*/
- std::size_t
+ uindex_t
getSize() const override
{
return num_points_;
/** \brief Sets the number of points contributing to this leaf */
void
- setPointCounter(int points_arg)
+ setPointCounter(uindex_t points_arg)
{
num_points_ = points_arg;
}
}
private:
- int num_points_;
+ uindex_t num_points_;
NeighborListT neighbors_;
DataT data_;
};
* \return number of point indices
*/
std::size_t
- getPointIndicesFromNewVoxels(std::vector<int>& indicesVector_arg,
- const int minPointsPerLeaf_arg = 0)
+ getPointIndicesFromNewVoxels(Indices& indicesVector_arg,
+ const uindex_t minPointsPerLeaf_arg = 0)
{
std::vector<OctreeContainerPointIndices*> leaf_containers;
this->serializeNewLeafs(leaf_containers);
for (const auto& leaf_container : leaf_containers) {
- if (static_cast<int>(leaf_container->getSize()) >= minPointsPerLeaf_arg)
+ if (static_cast<uindex_t>(leaf_container->getSize()) >= minPointsPerLeaf_arg)
leaf_container->getPointIndices(indicesVector_arg);
}
/** \brief Read input data. Only an internal counter is increased.
*/
- void
- addPointIndex(int)
- {
- point_counter_++;
- }
+ void addPointIndex(uindex_t) { point_counter_++; }
/** \brief Return point counter.
* \return Amount of points
*/
- unsigned int
+ uindex_t
getPointCounter()
{
return (point_counter_);
}
private:
- unsigned int point_counter_;
+ uindex_t point_counter_;
};
/** \brief @b Octree pointcloud density class
* \param[in] point_arg: a point addressing a voxel \return amount of points
* that fall within leaf node voxel
*/
- unsigned int
+ uindex_t
getVoxelDensityAtPoint(const PointT& point_arg) const
{
- unsigned int point_count = 0;
+ uindex_t point_count = 0;
OctreePointCloudDensityContainer* leaf = this->findLeafAtPoint(point_arg);
} // namespace octree
} // namespace pcl
+// needed since OctreePointCloud is not instantiated with template parameters used above
+#include <pcl/octree/impl/octree_pointcloud.hpp>
+
#define PCL_INSTANTIATE_OctreePointCloudDensity(T) \
template class PCL_EXPORTS pcl::octree::OctreePointCloudDensity<T>;
} // namespace octree
} // namespace pcl
+// needed since OctreePointCloud is not instantiated with template parameters used above
+#include <pcl/octree/impl/octree_pointcloud.hpp>
+
#define PCL_INSTANTIATE_OctreePointCloudSinglePoint(T) \
template class PCL_EXPORTS pcl::octree::OctreePointCloudSinglePoint<T>;
}
private:
- unsigned int point_counter_;
+ uindex_t point_counter_;
PointT point_sum_;
};
* \param pointIdx_arg
*/
void
- addPointIdx(const int pointIdx_arg) override
+ addPointIdx(const uindex_t pointIdx_arg) override
{
OctreeKey key;
- assert(pointIdx_arg < static_cast<int>(this->input_->size()));
+ assert(pointIdx_arg < this->input_->size());
const PointT& point = (*this->input_)[pointIdx_arg];
* \return "true" if voxel is found; "false" otherwise
*/
inline bool
- getVoxelCentroidAtPoint(const int& point_idx_arg, PointT& voxel_centroid_arg) const
+ getVoxelCentroidAtPoint(const index_t& point_idx_arg,
+ PointT& voxel_centroid_arg) const
{
// get centroid at point
return (this->getVoxelCentroidAtPoint((*this->input_)[point_idx_arg],
* elements
* \return number of occupied voxels
*/
- std::size_t
+ uindex_t
getVoxelCentroids(
typename OctreePointCloud<PointT, LeafContainerT, BranchContainerT>::
AlignedPointTVector& voxel_centroid_list_arg) const;
: public OctreePointCloud<PointT, LeafContainerT, BranchContainerT> {
public:
// public typedefs
- using IndicesPtr = shared_ptr<std::vector<int>>;
- using IndicesConstPtr = shared_ptr<const std::vector<int>>;
+ using IndicesPtr = shared_ptr<Indices>;
+ using IndicesConstPtr = shared_ptr<const Indices>;
using PointCloud = pcl::PointCloud<PointT>;
using PointCloudPtr = typename PointCloud::Ptr;
* \return "true" if leaf node exist; "false" otherwise
*/
bool
- voxelSearch(const PointT& point, std::vector<int>& point_idx_data);
+ voxelSearch(const PointT& point, Indices& point_idx_data);
/** \brief Search for neighbors within a voxel at given point referenced by a point
* index
* \return "true" if leaf node exist; "false" otherwise
*/
bool
- voxelSearch(const int index, std::vector<int>& point_idx_data);
+ voxelSearch(uindex_t index, Indices& point_idx_data);
/** \brief Search for k-nearest neighbors at the query point.
* \param[in] cloud the point cloud data
* points (must be resized to \a k a priori!)
* \return number of neighbors found
*/
- inline int
+ inline uindex_t
nearestKSearch(const PointCloud& cloud,
- int index,
- int k,
- std::vector<int>& k_indices,
+ uindex_t index,
+ uindex_t k,
+ Indices& k_indices,
std::vector<float>& k_sqr_distances)
{
return (nearestKSearch(cloud[index], k, k_indices, k_sqr_distances));
* points (must be resized to k a priori!)
* \return number of neighbors found
*/
- int
+ uindex_t
nearestKSearch(const PointT& p_q,
- int k,
- std::vector<int>& k_indices,
+ uindex_t k,
+ Indices& k_indices,
std::vector<float>& k_sqr_distances);
/** \brief Search for k-nearest neighbors at query point
* points (must be resized to \a k a priori!)
* \return number of neighbors found
*/
- int
- nearestKSearch(int index,
- int k,
- std::vector<int>& k_indices,
+ uindex_t
+ nearestKSearch(uindex_t index,
+ uindex_t k,
+ Indices& k_indices,
std::vector<float>& k_sqr_distances);
/** \brief Search for approx. nearest neighbor at the query point.
*/
inline void
approxNearestSearch(const PointCloud& cloud,
- int query_index,
- int& result_index,
+ uindex_t query_index,
+ index_t& result_index,
float& sqr_distance)
{
return (approxNearestSearch(cloud[query_index], result_index, sqr_distance));
* \param[out] sqr_distance the resultant squared distance to the neighboring point
*/
void
- approxNearestSearch(const PointT& p_q, int& result_index, float& sqr_distance);
+ approxNearestSearch(const PointT& p_q, index_t& result_index, float& sqr_distance);
/** \brief Search for approx. nearest neighbor at the query point.
* \param[in] query_index index representing the query point in the dataset given by
* \return number of neighbors found
*/
void
- approxNearestSearch(int query_index, int& result_index, float& sqr_distance);
+ approxNearestSearch(uindex_t query_index, index_t& result_index, float& sqr_distance);
/** \brief Search for all neighbors of query point that are within a given radius.
* \param[in] cloud the point cloud data
* \param[in] max_nn if given, bounds the maximum returned neighbors to this value
* \return number of neighbors found in radius
*/
- int
+ uindex_t
radiusSearch(const PointCloud& cloud,
- int index,
+ uindex_t index,
double radius,
- std::vector<int>& k_indices,
+ Indices& k_indices,
std::vector<float>& k_sqr_distances,
- unsigned int max_nn = 0)
+ index_t max_nn = 0)
{
return (radiusSearch(cloud[index], radius, k_indices, k_sqr_distances, max_nn));
}
* \param[in] max_nn if given, bounds the maximum returned neighbors to this value
* \return number of neighbors found in radius
*/
- int
+ uindex_t
radiusSearch(const PointT& p_q,
const double radius,
- std::vector<int>& k_indices,
+ Indices& k_indices,
std::vector<float>& k_sqr_distances,
- unsigned int max_nn = 0) const;
+ uindex_t max_nn = 0) const;
/** \brief Search for all neighbors of query point that are within a given radius.
* \param[in] index index representing the query point in the dataset given by \a
* \param[in] max_nn if given, bounds the maximum returned neighbors to this value
* \return number of neighbors found in radius
*/
- int
- radiusSearch(int index,
+ uindex_t
+ radiusSearch(uindex_t index,
const double radius,
- std::vector<int>& k_indices,
+ Indices& k_indices,
std::vector<float>& k_sqr_distances,
- unsigned int max_nn = 0) const;
+ uindex_t max_nn = 0) const;
/** \brief Get a PointT vector of centers of all voxels that intersected by a ray
* (origin, direction).
* disable)
* \return number of intersected voxels
*/
- int
+ uindex_t
getIntersectedVoxelCenters(Eigen::Vector3f origin,
Eigen::Vector3f direction,
AlignedPointTVector& voxel_center_list,
- int max_voxel_count = 0) const;
+ uindex_t max_voxel_count = 0) const;
/** \brief Get indices of all voxels that are intersected by a ray (origin,
* direction).
* disable)
* \return number of intersected voxels
*/
- int
+ uindex_t
getIntersectedVoxelIndices(Eigen::Vector3f origin,
Eigen::Vector3f direction,
- std::vector<int>& k_indices,
- int max_voxel_count = 0) const;
+ Indices& k_indices,
+ uindex_t max_voxel_count = 0) const;
/** \brief Search for points within rectangular search area
* Points exactly on the edges of the search rectangle are included.
* \param[out] k_indices the resultant point indices
* \return number of points found within search area
*/
- int
+ uindex_t
boxSearch(const Eigen::Vector3f& min_pt,
const Eigen::Vector3f& max_pt,
- std::vector<int>& k_indices) const;
+ Indices& k_indices) const;
protected:
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
* \param[in] point_idx index for a dataset point given by \a setInputCloud
* \param[in] point_distance distance of query point to voxel center
*/
- prioPointQueueEntry(unsigned int& point_idx, float point_distance)
+ prioPointQueueEntry(uindex_t point_idx, float point_distance)
: point_idx_(point_idx), point_distance_(point_distance)
{}
}
/** \brief Index representing a point in the dataset given by \a setInputCloud. */
- int point_idx_;
+ uindex_t point_idx_;
/** \brief Distance to query point. */
float point_distance_;
const double radiusSquared,
const BranchNode* node,
const OctreeKey& key,
- unsigned int tree_depth,
- std::vector<int>& k_indices,
+ uindex_t tree_depth,
+ Indices& k_indices,
std::vector<float>& k_sqr_distances,
- unsigned int max_nn) const;
+ uindex_t max_nn) const;
/** \brief Recursive search method that explores the octree and finds the K nearest
* neighbors
double
getKNearestNeighborRecursive(
const PointT& point,
- unsigned int K,
+ uindex_t K,
const BranchNode* node,
const OctreeKey& key,
- unsigned int tree_depth,
+ uindex_t tree_depth,
const double squared_search_radius,
std::vector<prioPointQueueEntry>& point_candidates) const;
approxNearestSearchRecursive(const PointT& point,
const BranchNode* node,
const OctreeKey& key,
- unsigned int tree_depth,
- int& result_index,
+ uindex_t tree_depth,
+ index_t& result_index,
float& sqr_distance);
/** \brief Recursively search the tree for all intersected leaf nodes and return a
* disable)
* \return number of voxels found
*/
- int
+ uindex_t
getIntersectedVoxelCentersRecursive(double min_x,
double min_y,
double min_z,
const OctreeNode* node,
const OctreeKey& key,
AlignedPointTVector& voxel_center_list,
- int max_voxel_count) const;
+ uindex_t max_voxel_count) const;
/** \brief Recursive search method that explores the octree and finds points within a
* rectangular search area
const Eigen::Vector3f& max_pt,
const BranchNode* node,
const OctreeKey& key,
- unsigned int tree_depth,
- std::vector<int>& k_indices) const;
+ uindex_t tree_depth,
+ Indices& k_indices) const;
/** \brief Recursively search the tree for all intersected leaf nodes and return a
* vector of indices. This algorithm is based off the paper An Efficient Parametric
* disable)
* \return number of voxels found
*/
- int
+ uindex_t
getIntersectedVoxelIndicesRecursive(double min_x,
double min_y,
double min_z,
unsigned char a,
const OctreeNode* node,
const OctreeKey& key,
- std::vector<int>& k_indices,
- int max_voxel_count) const;
+ Indices& k_indices,
+ uindex_t max_voxel_count) const;
/** \brief Initialize raytracing algorithm
* \param origin
// Instantiations of specific point types
-template class PCL_EXPORTS pcl::octree::OctreeBase<int>;
-template class PCL_EXPORTS pcl::octree::Octree2BufBase<int>;
+template class PCL_EXPORTS pcl::octree::OctreeBase<pcl::index_t>;
+template class PCL_EXPORTS pcl::octree::Octree2BufBase<pcl::index_t>;
template class PCL_EXPORTS
pcl::octree::OctreeBase<pcl::octree::OctreeContainerPointIndices,
"include/pcl/${SUBSYS_NAME}/visualization/scene.h"
"include/pcl/${SUBSYS_NAME}/visualization/viewport.h"
)
+
+# Code in subdirectory `visualization` may use deprecated declarations when using OpenGLv1
+# Add the GCC exclusive rules for -Werror only for OpenGLv1 compile to avoid build interruption
+if(VTK_RENDERING_BACKEND_OPENGL_VERSION VERSION_LESS 2)
+ if(CMAKE_COMPILER_IS_GNUCXX)
+ add_compile_options("-Wno-error=deprecated-declarations")
+ endif()
+endif()
+
set(LIB_NAME "pcl_${SUBSYS_NAME}")
include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include")
pcl::PCLPointCloud2::Ptr downsampled_cloud (new pcl::PCLPointCloud2 ());
//create destination for indices
- pcl::IndicesPtr downsampled_cloud_indices (new std::vector< int > ());
+ pcl::IndicesPtr downsampled_cloud_indices (new pcl::Indices ());
lod_filter_ptr_->filter (*downsampled_cloud_indices);
//extract the "random subset", size by setSampleSize
//indices to store the points for each bin
//these lists will be used to copy data to new point clouds and pass down recursively
- std::vector < std::vector<int> > indices;
+ std::vector < pcl::Indices > indices;
indices.resize (8);
this->sortOctantIndices (input_cloud, indices, node_metadata_->getVoxelCenter ());
pcl::PCLPointCloud2::Ptr downsampled_cloud ( new pcl::PCLPointCloud2 () );
//create destination for indices
- pcl::IndicesPtr downsampled_cloud_indices ( new std::vector< int > () );
+ pcl::IndicesPtr downsampled_cloud_indices ( new pcl::Indices () );
random_sampler.filter (*downsampled_cloud_indices);
//extract the "random subset", size by setSampleSize
PCL_DEBUG ("[pcl::outofcore::OutofcoreOctreeBaseNode::%s] Remaining points are %u\n",__FUNCTION__, remaining_points->width*remaining_points->height);
//subdivide remaining data by destination octant
- std::vector<std::vector<int> > indices;
+ std::vector<pcl::Indices> indices;
indices.resize (8);
this->sortOctantIndices (remaining_points, indices, node_metadata_->getVoxelCenter ());
//if already has 8 children, return
if (children_[idx] || (num_children_ == 8))
{
- PCL_ERROR ("[pcl::outofcore::OutofcoreOctreeBaseNode::createChild] Not allowed to create a 9th child of %s",this->node_metadata_->getMetadataFilename ().c_str ());
+ PCL_ERROR ("[pcl::outofcore::OutofcoreOctreeBaseNode::createChild] Not allowed to create a 9th child of %s\n",this->node_metadata_->getMetadataFilename ().c_str ());
return;
}
Eigen::Vector4f min_pt ( static_cast<float> ( min_bb[0] ), static_cast<float> ( min_bb[1] ), static_cast<float> ( min_bb[2] ), 1.0f);
Eigen::Vector4f max_pt ( static_cast<float> ( max_bb[0] ), static_cast<float> ( max_bb[1] ) , static_cast<float>( max_bb[2] ), 1.0f );
- std::vector<int> indices;
+ pcl::Indices indices;
pcl::getPointsInBox ( *tmp_cloud, min_pt, max_pt, indices );
PCL_DEBUG ( "[pcl::outofcore::OutofcoreOctreeBaseNode::%s] Points in box: %d\n", __FUNCTION__, indices.size () );
pcl::ExtractIndices<pcl::PCLPointCloud2> extractor;
extractor.setInputCloud (tmp_blob);
- pcl::IndicesPtr downsampled_cloud_indices (new std::vector<int> ());
+ pcl::IndicesPtr downsampled_cloud_indices (new pcl::Indices ());
random_sampler.filter (*downsampled_cloud_indices);
extractor.setIndices (downsampled_cloud_indices);
extractor.filter (*downsampled_points);
////////////////////////////////////////////////////////////////////////////////
template<typename ContainerT, typename PointT> void
- OutofcoreOctreeBaseNode<ContainerT, PointT>::sortOctantIndices (const pcl::PCLPointCloud2::Ptr &input_cloud, std::vector< std::vector<int> > &indices, const Eigen::Vector3d &mid_xyz)
+ OutofcoreOctreeBaseNode<ContainerT, PointT>::sortOctantIndices (const pcl::PCLPointCloud2::Ptr &input_cloud, std::vector< pcl::Indices > &indices, const Eigen::Vector3d &mid_xyz)
{
if (indices.size () < 8)
indices.resize (8);
if(!this->pointInBoundingBox (local_pt))
{
- PCL_ERROR ("pcl::outofcore::OutofcoreOctreeBaseNode::%s] Point %2.lf %.2lf %.2lf not in bounding box", __FUNCTION__, local_pt.x, local_pt.y, local_pt.z);
+ PCL_ERROR ("pcl::outofcore::OutofcoreOctreeBaseNode::%s] Point %2.lf %.2lf %.2lf not in bounding box\n", __FUNCTION__, local_pt.x, local_pt.y, local_pt.z);
}
assert (this->pointInBoundingBox (local_pt) == true);
#pragma once
#include <mutex>
-#include <string>
#include <vector>
#include <pcl/outofcore/boost.h>
using PointCloud = pcl::PointCloud<PointT>;
- using IndicesPtr = shared_ptr<std::vector<int> >;
- using IndicesConstPtr = shared_ptr<const std::vector<int> >;
+ using IndicesPtr = shared_ptr<pcl::Indices>;
+ using IndicesConstPtr = shared_ptr<const pcl::Indices>;
using PointCloudPtr = typename PointCloud::Ptr;
using PointCloudConstPtr = typename PointCloud::ConstPtr;
*
* \param[in] input_cloud The cloud of points to be inserted into the out-of-core octree. Note if multiple PCLPointCloud2 objects are added to the tree, this assumes that they all have exactly the same fields.
* \param[in] skip_bb_check (default=false) whether to skip the bounding box check on insertion. Note the bounding box check is never skipped in the current implementation.
- * \return Number of poitns successfully copied from the point cloud to the octree
+ * \return Number of points successfully copied from the point cloud to the octree
*/
std::uint64_t
addPointCloud (pcl::PCLPointCloud2::Ptr &input_cloud, const bool skip_bb_check = false);
* This could be overloaded with a parallelized implementation
*/
void
- sortOctantIndices (const pcl::PCLPointCloud2::Ptr &input_cloud, std::vector< std::vector<int> > &indices, const Eigen::Vector3d &mid_xyz);
+ sortOctantIndices (const pcl::PCLPointCloud2::Ptr &input_cloud, std::vector< pcl::Indices > &indices, const Eigen::Vector3d &mid_xyz);
/** \brief Enlarges the shortest two sidelengths of the
* bounding box to a cubic shape; operation is done in
// C++
#include <mutex>
-#include <vector>
#include <string>
// Boost
// C++
#include <mutex>
#include <random>
-#include <vector>
#include <pcl/outofcore/boost.h>
#include <pcl/outofcore/octree_abstract_node_container.h>
#include <pcl/outofcore/boost.h>
#include <pcl/outofcore/cJSON.h>
-#include <pcl/common/eigen.h>
-
#include <pcl/outofcore/metadata.h>
//standard library
#pragma once
#include <pcl/outofcore/outofcore_iterator_base.h>
+
+#include <deque>
+
namespace pcl
{
namespace outofcore
<b>pcl_outofcore</b> provides an interface to construct and query
outofcore octrees via OutofcoreOctreeBase. The out of core octree
- can be used with any PCLPointCloud2 with point types containing ``x'',
- ``y'' and ``z'' fields. No internal checking is done to verify
+ can be used with any PCLPointCloud2 with point types containing x,
+ y and z fields. No internal checking is done to verify
this. On the other hand, point clouds do not need to be filtered for
NaN entries; the library will automatically ignore NaN points in the
insertion methods.
#include <pcl/console/print.h>
#include <pcl/pcl_macros.h>
-#include <pcl/common/io.h>
+#include <pcl/exceptions.h> // for PCL_THROW_EXCEPTION, PCLException
#include <iostream>
#include <fstream>
-#include <sstream>
#include <memory>
namespace pcl
else()
set(DEFAULT TRUE)
set(REASON)
- set(VTK_USE_FILE "${VTK_USE_FILE}" CACHE INTERNAL "VTK_USE_FILE")
- include("${VTK_USE_FILE}")
include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include")
set(srcs outofcore_viewer.cpp
#include <pcl/common/time.h>
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
-#include <pcl/PCLPointCloud2.h>
#include <pcl/io/pcd_io.h>
#include <pcl/pcl_macros.h>
#include <boost/accumulators/statistics/max.hpp>
#include <boost/accumulators/statistics/mean.hpp>
#include <boost/accumulators/statistics/variance.hpp>
+#include <boost/algorithm/string/replace.hpp> // for replace_first
namespace ba = boost::accumulators;
boost::filesystem::path pcd_path (argv[file_arg_index]);
if (!boost::filesystem::exists (pcd_path))
{
- PCL_WARN ("File %s doesn't exist", pcd_path.string ().c_str ());
+ PCL_WARN ("File %s doesn't exist\n", pcd_path.string ().c_str ());
continue;
}
pcd_paths.push_back (pcd_path);
#cmakedefine HAVE_QHULL 1
-#cmakedefine HAVE_QHULL_2011 1
-
#cmakedefine HAVE_CUDA 1
#cmakedefine HAVE_ENSENSO 1
/* Version of OpenGL used by VTK as rendering backend */
#define VTK_RENDERING_BACKEND_OPENGL_VERSION ${VTK_RENDERING_BACKEND_OPENGL_VERSION}
+#cmakedefine HAVE_QVTK 1
+
else()
set(DEFAULT TRUE)
set(REASON)
- set(VTK_USE_FILE "${VTK_USE_FILE}" CACHE INTERNAL "VTK_USE_FILE")
- include("${VTK_USE_FILE}")
include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include")
endif()
// Ground plane estimation:
Eigen::VectorXf ground_coeffs;
ground_coeffs.resize(4);
- std::vector<int> clicked_points_indices;
+ pcl::Indices clicked_points_indices;
for (std::size_t i = 0; i < clicked_points_3d->size(); i++)
clicked_points_indices.push_back(i);
pcl::SampleConsensusModelPlane<PointT> model_plane(clicked_points_3d);
#include <pcl/point_types.h>
#include <pcl/sample_consensus/sac_model_plane.h>
#include <pcl/sample_consensus/ransac.h>
-#include <pcl/filters/extract_indices.h>
-#include <pcl/segmentation/extract_clusters.h>
#include <pcl/kdtree/kdtree.h>
-#include <pcl/filters/voxel_grid.h>
#include <pcl/people/person_cluster.h>
#include <pcl/people/head_based_subcluster.h>
#include <pcl/people/person_classifier.h>
#define PCL_PEOPLE_GROUND_BASED_PEOPLE_DETECTION_APP_HPP_
#include <pcl/people/ground_based_people_detection_app.h>
+#include <pcl/filters/extract_indices.h> // for ExtractIndices
+#include <pcl/segmentation/extract_clusters.h> // for EuclideanClusterExtraction
+#include <pcl/filters/voxel_grid.h> // for VoxelGrid
template <typename PointT>
pcl::people::GroundBasedPeopleDetectionApp<PointT>::GroundBasedPeopleDetectionApp ()
filter();
// Ground removal and update:
- pcl::IndicesPtr inliers(new std::vector<int>);
+ pcl::IndicesPtr inliers(new pcl::Indices);
typename pcl::SampleConsensusModelPlane<PointT>::Ptr ground_model (new pcl::SampleConsensusModelPlane<PointT> (cloud_filtered_));
ground_model->selectWithinDistance(ground_coeffs_transformed_, 2 * voxel_size_, *inliers);
no_ground_cloud_ = PointCloudPtr (new PointCloud);
if (!used_clusters[cluster]) // if this cluster has not been used yet
{
used_clusters[cluster] = true;
- for(std::vector<int>::const_iterator points_iterator = input_clusters[cluster].getIndices().indices.begin();
- points_iterator != input_clusters[cluster].getIndices().indices.end(); ++points_iterator)
+ for(const auto& cluster_idx : input_clusters[cluster].getIndices().indices)
{
- point_indices.indices.push_back(*points_iterator);
+ point_indices.indices.push_back(cluster_idx);
}
}
}
Eigen::Vector3f head_ground_coeffs = ground_coeffs_.head(3); // ground plane normal (precomputed for speed)
Eigen::Matrix3Xf maxima_projected(3,maxima_number); // matrix containing the projection of maxima onto the ground plane
Eigen::VectorXi subclusters_number_of_points(maxima_number); // subclusters number of points
- std::vector <std::vector <int> > sub_clusters_indices; // vector of vectors with the cluster indices for every maximum
+ std::vector <pcl::Indices> sub_clusters_indices; // vector of vectors with the cluster indices for every maximum
sub_clusters_indices.resize(maxima_number); // resize to number of maxima
// Project maxima on the ground plane:
}
// Associate cluster points to one of the maximum:
- for(std::vector<int>::const_iterator points_iterator = cluster.getIndices().indices.begin(); points_iterator != cluster.getIndices().indices.end(); ++points_iterator)
+ for(const auto& cluster_idx : cluster.getIndices().indices)
{
- PointT* current_point = &(*cloud_)[*points_iterator]; // current point cloud point
- Eigen::Vector3f p_current_eigen(current_point->x, current_point->y, current_point->z); // conversion to eigen
+ Eigen::Vector3f p_current_eigen((*cloud_)[cluster_idx].x, (*cloud_)[cluster_idx].y, (*cloud_)[cluster_idx].z); // conversion to eigen
float t = p_current_eigen.dot(head_ground_coeffs) / normalize_factor; // height from the ground
p_current_eigen -= head_ground_coeffs * t; // projection of the point on the groundplane
if (((p_current_eigen - maxima_projected.col(i)).norm()) < heads_minimum_distance_)
{
correspondence_detected = true;
- sub_clusters_indices[i].push_back(*points_iterator);
+ sub_clusters_indices[i].push_back(cluster_idx);
subclusters_number_of_points(i)++;
}
else
buckets_.resize(std::size_t((cluster.getMax()(1) - cluster.getMin()(1)) / bin_size_) + 1, 0);
buckets_cloud_indices_.resize(buckets_.size(), 0);
- for(std::vector<int>::const_iterator pit = cluster.getIndices().indices.begin(); pit != cluster.getIndices().indices.end(); ++pit)
+ for(const auto& cluster_idx : cluster.getIndices().indices)
{
- PointT* p = &(*cloud_)[*pit];
+ PointT* p = &(*cloud_)[cluster_idx];
int index;
if (!vertical_) // camera horizontal
index = int((p->x - cluster.getMin()(0)) / bin_size_);
if ((heightp * 60) > buckets_[index]) // compare the height of the new point with the existing one
{
buckets_[index] = heightp * 60; // maximum height
- buckets_cloud_indices_[index] = *pit; // point cloud index of the point with maximum height
+ buckets_cloud_indices_[index] = cluster_idx; // point cloud index of the point with maximum height
}
}
}
points_indices_.indices = indices.indices;
- for (std::vector<int>::const_iterator pit = points_indices_.indices.begin(); pit != points_indices_.indices.end(); pit++)
+ for (const auto& index : (points_indices_.indices))
{
- PointT* p = &(*input_cloud)[*pit];
+ PointT* p = &(*input_cloud)[index];
min_x_ = std::min(p->x, min_x_);
max_x_ = std::max(p->x, max_x_);
if (!vertical_)
{
head_threshold_value = min_y_ + height_ / 8.0f; // head is suppose to be 1/8 of the human height
- for (std::vector<int>::const_iterator pit = points_indices_.indices.begin(); pit != points_indices_.indices.end(); pit++)
+ for (const auto& index : (points_indices_.indices))
{
- PointT* p = &(*input_cloud)[*pit];
+ PointT* p = &(*input_cloud)[index];
if(p->y < head_threshold_value)
{
else
{
head_threshold_value = max_x_ - height_ / 8.0f; // head is suppose to be 1/8 of the human height
- for (std::vector<int>::const_iterator pit = points_indices_.indices.begin(); pit != points_indices_.indices.end(); pit++)
+ for (const auto& index : (points_indices_.indices))
{
- PointT* p = &(*input_cloud)[*pit];
+ PointT* p = &(*input_cloud)[index];
if(p->x > head_threshold_value)
{
float min_z = c_z_;
float max_x = c_x_;
float max_z = c_z_;
- for (std::vector<int>::const_iterator pit = points_indices_.indices.begin(); pit != points_indices_.indices.end(); ++pit)
+ for (const auto& index : (points_indices_.indices))
{
- PointT* p = &(*input_cloud)[*pit];
+ PointT* p = &(*input_cloud)[index];
min_x = std::min(p->x, min_x);
max_x = std::max(p->x, max_x);
float min_z = c_z_;
float max_y = c_y_;
float max_z = c_z_;
- for (std::vector<int>::const_iterator pit = points_indices_.indices.begin(); pit != points_indices_.indices.end(); ++pit)
+ for (const auto& index : (points_indices_.indices))
{
- PointT* p = &(*input_cloud)[*pit];
+ PointT* p = &(*input_cloud)[index];
min_y = std::min(p->y, min_y);
max_y = std::max(p->y, max_y);
coeffs.values.push_back (0.5);
}
- std::stringstream bbox_name;
- bbox_name << "bbox_person_" << person_number;
- viewer.removeShape (bbox_name.str());
- viewer.addCube (coeffs, bbox_name.str());
- viewer.setShapeRenderingProperties (pcl::visualization::PCL_VISUALIZER_COLOR, 0.0, 1.0, 0.0, bbox_name.str());
- viewer.setShapeRenderingProperties (pcl::visualization::PCL_VISUALIZER_LINE_WIDTH, 2, bbox_name.str());
-
- // std::stringstream confid;
- // confid << person_confidence_;
- // PointT position;
- // position.x = tcenter_[0]- 0.2;
- // position.y = ttop_[1];
- // position.z = tcenter_[2];
- // viewer.addText3D(confid.str().substr(0, 4), position, 0.1);
+ const std::string bbox_name = "bbox_person_" + std::to_string(person_number);
+ viewer.removeShape (bbox_name);
+ viewer.addCube (coeffs, bbox_name);
+ viewer.setShapeRenderingProperties (pcl::visualization::PCL_VISUALIZER_COLOR, 0.0, 1.0, 0.0, bbox_name);
+ viewer.setShapeRenderingProperties (pcl::visualization::PCL_VISUALIZER_LINE_WIDTH, 2, bbox_name);
}
template <typename PointT>
"include/pcl/${SUBSYS_NAME}/dense_quantized_multi_mod_template.h"
"include/pcl/${SUBSYS_NAME}/sparse_quantized_multi_mod_template.h"
"include/pcl/${SUBSYS_NAME}/surface_normal_modality.h"
- "include/pcl/${SUBSYS_NAME}/linemod/line_rgbd.h"
+ "include/pcl/${SUBSYS_NAME}/line_rgbd.h"
"include/pcl/${SUBSYS_NAME}/implicit_shape_model.h"
- "include/pcl/${SUBSYS_NAME}/ransac_based/auxiliary.h"
- "include/pcl/${SUBSYS_NAME}/ransac_based/hypothesis.h"
- "include/pcl/${SUBSYS_NAME}/ransac_based/model_library.h"
- "include/pcl/${SUBSYS_NAME}/ransac_based/rigid_transform_space.h"
- "include/pcl/${SUBSYS_NAME}/ransac_based/obj_rec_ransac.h"
- "include/pcl/${SUBSYS_NAME}/ransac_based/orr_graph.h"
- "include/pcl/${SUBSYS_NAME}/ransac_based/orr_octree_zprojection.h"
- "include/pcl/${SUBSYS_NAME}/ransac_based/trimmed_icp.h"
- "include/pcl/${SUBSYS_NAME}/ransac_based/orr_octree.h"
- "include/pcl/${SUBSYS_NAME}/ransac_based/simple_octree.h"
- "include/pcl/${SUBSYS_NAME}/ransac_based/voxel_structure.h"
- "include/pcl/${SUBSYS_NAME}/ransac_based/bvh.h"
+ "include/pcl/${SUBSYS_NAME}/auxiliary.h"
+ "include/pcl/${SUBSYS_NAME}/hypothesis.h"
+ "include/pcl/${SUBSYS_NAME}/model_library.h"
+ "include/pcl/${SUBSYS_NAME}/rigid_transform_space.h"
+ "include/pcl/${SUBSYS_NAME}/obj_rec_ransac.h"
+ "include/pcl/${SUBSYS_NAME}/orr_graph.h"
+ "include/pcl/${SUBSYS_NAME}/orr_octree_zprojection.h"
+ "include/pcl/${SUBSYS_NAME}/trimmed_icp.h"
+ "include/pcl/${SUBSYS_NAME}/orr_octree.h"
+ "include/pcl/${SUBSYS_NAME}/simple_octree.h"
+ "include/pcl/${SUBSYS_NAME}/voxel_structure.h"
+ "include/pcl/${SUBSYS_NAME}/bvh.h"
)
set(ransac_based_incs
)
set(impl_incs
- "include/pcl/${SUBSYS_NAME}/impl/linemod/line_rgbd.hpp"
- "include/pcl/${SUBSYS_NAME}/impl/ransac_based/simple_octree.hpp"
- "include/pcl/${SUBSYS_NAME}/impl/ransac_based/voxel_structure.hpp"
+ "include/pcl/${SUBSYS_NAME}/impl/line_rgbd.hpp"
+ "include/pcl/${SUBSYS_NAME}/impl/simple_octree.hpp"
+ "include/pcl/${SUBSYS_NAME}/impl/voxel_structure.hpp"
"include/pcl/${SUBSYS_NAME}/impl/implicit_shape_model.hpp"
)
--- /dev/null
+# License
+
+PCL carries a custom (old) version of METSlib which was added (after relicensing for compatibility with PCL) under BSD 3 clause. The details for the addition are:
+* author: Mirko Maischberger <mirko.maischberger@gmail.com>
+* commit: e4d6501af048215ce84b4ee436ff0e18dba2d30d
+* URL: https://projects.coin-or.org/metslib
+
+This has been modified to work with system installed METSlib as well.
+Though METSlib is available primarily under GPLv3, it is dually licensed to be available under EPL-1.0 (a commercial friendly license) as well.
+
+# TL;DR
+This implies the following:
+* While using PCL with the METSlib shipped with it, everything is licensed under BSD
+* While using PCL with system installed METSlib, PCL adheres to the API available under the EPL-1.0 license
+* PCL doesn't use the system installed METSlib released under GPLv3 for testing/reference/linking
std::tr1::variate_generator<random_generator&,
std::tr1::uniform_int<std::size_t> >gen(rng, unigen);
#endif
- std::random_shuffle(p.pi_m.begin(), p.pi_m.end(), gen);
+ std::shuffle(p.pi_m.begin(), p.pi_m.end(), gen);
p.update_cost();
}
--- /dev/null
+#include <pcl/recognition/ransac_based/auxiliary.h>
+PCL_DEPRECATED_HEADER(1, 15, "Please use <pcl/recognition/ransac_based/auxiliary.h> instead")
#if defined __GNUC__
# pragma GCC system_header
#endif
+PCL_DEPRECATED_HEADER(1, 15, "Please include the needed boost headers directly.")
#include <boost/graph/graph_traits.hpp>
--- /dev/null
+#include <pcl/recognition/ransac_based/bvh.h>
+PCL_DEPRECATED_HEADER(1, 15, "Please use <pcl/recognition/ransac_based/bvh.h> instead")
#pragma once
#include <pcl/recognition/cg/correspondence_grouping.h>
-#include <pcl/recognition/boost.h>
#include <pcl/memory.h>
#include <pcl/pcl_macros.h>
#include <pcl/point_types.h>
const int width = input_->width;
const int height = input_->height;
- color_gradients_.points.resize (width*height);
+ color_gradients_.resize (width*height);
color_gradients_.width = width;
color_gradients_.height = height;
const int width = cloud->width;
const int height = cloud->height;
- color_gradients_.points.resize (width*height);
+ color_gradients_.resize (width*height);
color_gradients_.width = width;
color_gradients_.height = height;
const int width = cloud->width;
const int height = cloud->height;
- color_gradients_.points.resize (width*height);
+ color_gradients_.resize (width*height);
color_gradients_.width = width;
color_gradients_.height = height;
#pragma once
-#include <vector>
#include <pcl/pcl_macros.h>
#include <pcl/recognition/quantized_map.h>
#include <pcl/recognition/mask_map.h>
{
cloud_out.width = max_col - min_col + 1;
cloud_out.height = max_row - min_row + 1;
- cloud_out.points.resize (cloud_out.width * cloud_out.height);
+ cloud_out.resize (cloud_out.width * cloud_out.height);
for (unsigned int u = 0; u < cloud_out.width; u++)
{
for (unsigned int v = 0; v < cloud_out.height; v++)
#include <pcl/pcl_macros.h>
#include <pcl/recognition/hv/hypotheses_verification.h>
-#include <pcl/common/common.h>
#include <memory>
#include <pcl/pcl_macros.h>
#include <pcl/recognition/hv/hypotheses_verification.h>
-#include <pcl/common/common.h>
#include <pcl/recognition/3rdparty/metslib/mets.hh>
#include <pcl/features/normal_3d.h>
#pragma once
-#include <pcl/recognition/boost.h>
#include <pcl/pcl_macros.h>
-#include <pcl/common/common.h>
#include <pcl/recognition/hv/hypotheses_verification.h>
#include <boost/graph/adjacency_list.hpp>
#include <pcl/pcl_macros.h>
#include "pcl/recognition/hv/occlusion_reasoning.h"
#include "pcl/recognition/impl/hv/occlusion_reasoning.hpp"
-#include <pcl/common/common.h>
#include <pcl/search/kdtree.h>
#include <pcl/filters/voxel_grid.h>
//we need to reason about occlusions before setting the model
if (scene_cloud_ == nullptr)
{
- PCL_ERROR("setSceneCloud should be called before adding the model if reasoning about occlusions...");
+ PCL_ERROR("setSceneCloud should be called before adding the model if reasoning about occlusions...\n");
}
if (!occlusion_cloud_->isOrganized ())
typename pcl::PointCloud<ModelT>::Ptr filtered (new pcl::PointCloud<ModelT> ());
typename pcl::occlusion_reasoning::ZBuffering<ModelT, SceneT> zbuffer_self_occlusion (75, 75, 1.f);
zbuffer_self_occlusion.computeDepthMap (models[i], true);
- std::vector<int> indices;
+ pcl::Indices indices;
zbuffer_self_occlusion.filter (models[i], indices, 0.005f);
pcl::copyPointCloud (*models[i], indices, *filtered);
#pragma once
-#include <pcl/common/common.h>
-#include <pcl/common/transforms.h>
#include <pcl/common/io.h>
namespace pcl
computeDepthMap (typename pcl::PointCloud<SceneT>::ConstPtr & scene, bool compute_focal = false, bool smooth = false, int wsize = 3);
void
filter (typename pcl::PointCloud<ModelT>::ConstPtr & model, typename pcl::PointCloud<ModelT>::Ptr & filtered, float thres = 0.01);
- void filter (typename pcl::PointCloud<ModelT>::ConstPtr & model, std::vector<int> & indices, float thres = 0.01);
+ void filter (typename pcl::PointCloud<ModelT>::ConstPtr & model, pcl::Indices & indices, float thres = 0.01);
};
template<typename ModelT, typename SceneT> typename pcl::PointCloud<ModelT>::Ptr
float cy = (static_cast<float> (organized_cloud->height) / 2.f - 0.5f);
typename pcl::PointCloud<ModelT>::Ptr filtered (new pcl::PointCloud<ModelT> ());
- std::vector<int> indices_to_keep;
+ pcl::Indices indices_to_keep;
indices_to_keep.resize (to_be_filtered->size ());
int keep = 0;
--- /dev/null
+#include <pcl/recognition/ransac_based/hypothesis.h>
+PCL_DEPRECATED_HEADER(1, 15, "Please use <pcl/recognition/ransac_based/hypothesis.h> instead")
#ifndef PCL_RECOGNITION_HOUGH_3D_IMPL_H_
#define PCL_RECOGNITION_HOUGH_3D_IMPL_H_
+#include <pcl/common/io.h> // for copyPointCloud
#include <pcl/recognition/cg/hough_3d.h>
#include <pcl/registration/correspondence_types.h>
#include <pcl/registration/correspondence_rejection_sample_consensus.h>
std::vector<int> explained_indices;
std::vector<int> outliers;
- std::vector<int> nn_indices;
+ pcl::Indices nn_indices;
std::vector<float> nn_distances;
for (std::size_t i = 0; i < recog_model->cloud_->size (); i++)
#define PCL_RECOGNITION_IMPL_HV_GO_HPP_
#include <pcl/recognition/hv/hv_go.h>
+#include <pcl/common/common.h> // for getMinMax3D
#include <pcl/common/time.h>
#include <pcl/point_types.h>
// Create a bool vector of processed point indices, and initialize it to false
std::vector<bool> processed (cloud.size (), false);
- std::vector<int> nn_indices;
+ pcl::Indices nn_indices;
std::vector<float> nn_distances;
// Process all points in the indices vector
int size = static_cast<int> (cloud.size ());
setPreviousBadInfo (bad_info);
int n_active_hyp = 0;
- for(const bool &i : active) {
+ for(const bool i : active) {
if(i)
n_active_hyp++;
}
std::vector<float> outliers_weight;
std::vector<float> explained_indices_distances;
- std::vector<int> nn_indices;
+ pcl::Indices nn_indices;
std::vector<float> nn_distances;
std::map<int, std::shared_ptr<std::vector<std::pair<int, float>>>> model_explains_scene_points; //which point i from the scene is explained by a points j_k with dist d_k from the model
if (outliers_weight.empty ())
recog_model->outliers_weight_ = 1.f;
- pcl::IndicesPtr indices_scene (new std::vector<int>);
+ pcl::IndicesPtr indices_scene (new pcl::Indices);
//go through the map and keep the closest model point in case that several model points explain a scene point
int p = 0;
{
float rn_sqr = radius_neighborhood_GO_ * radius_neighborhood_GO_;
- std::vector<int> nn_indices;
+ pcl::Indices nn_indices;
std::vector<float> nn_distances;
std::vector < std::pair<int, int> > neighborhood_indices; //first is indices to scene point and second is indices to explained_ scene points
- for (int i = 0; i < static_cast<int> (recog_model->explained_.size ()); i++)
+ for (pcl::index_t i = 0; i < static_cast<pcl::index_t> (recog_model->explained_.size ()); i++)
{
if (scene_downsampled_tree_->radiusSearch ((*scene_cloud_downsampled_)[recog_model->explained_[i]], radius_neighborhood_GO_, nn_indices,
nn_distances, std::numeric_limits<int>::max ()))
std::vector<int> explained_indices;
std::vector<int> outliers;
- std::vector<int> nn_indices;
+ pcl::Indices nn_indices;
std::vector<float> nn_distances;
for (std::size_t i = 0; i < recog_model->cloud_->size (); i++)
pcl::occlusion_reasoning::ZBuffering<ModelT, SceneT>::filter (typename pcl::PointCloud<ModelT>::ConstPtr & model,
typename pcl::PointCloud<ModelT>::Ptr & filtered, float thres)
{
- std::vector<int> indices_to_keep;
+ pcl::Indices indices_to_keep;
filter(model, indices_to_keep, thres);
pcl::copyPointCloud (*model, indices_to_keep, *filtered);
}
///////////////////////////////////////////////////////////////////////////////////////////
template<typename ModelT, typename SceneT> void
pcl::occlusion_reasoning::ZBuffering<ModelT, SceneT>::filter (typename pcl::PointCloud<ModelT>::ConstPtr & model,
- std::vector<int> & indices_to_keep, float thres)
+ pcl::Indices & indices_to_keep, float thres)
{
float cx, cy;
#define PCL_IMPLICIT_SHAPE_MODEL_HPP_
#include "../implicit_shape_model.h"
+#include <pcl/filters/voxel_grid.h> // for VoxelGrid
+#include <pcl/filters/extract_indices.h> // for ExtractIndices
#include <pcl/memory.h> // for dynamic_pointer_cast
--- /dev/null
+#include <pcl/recognition/impl/linemod/line_rgbd.hpp>
+PCL_DEPRECATED_HEADER(1, 15, "Please use <pcl/recognition/impl/linemod/line_rgbd.hpp> instead")
#ifndef SIMPLE_OCTREE_HPP_
#define SIMPLE_OCTREE_HPP_
-#include <algorithm>
#include <cmath>
--- /dev/null
+#include <pcl/recognition/impl/ransac_based/simple_octree.hpp>
+PCL_DEPRECATED_HEADER(1, 15, "Please use <pcl/recognition/impl/ransac_based/simple_octree.hpp> instead")
--- /dev/null
+#include <pcl/recognition/impl/ransac_based/voxel_structure.hpp>
+PCL_DEPRECATED_HEADER(1, 15, "Please use <pcl/recognition/impl/ransac_based/voxel_structure.hpp> instead")
#include <vector>
#include <fstream>
-#include <limits>
#include <Eigen/src/Core/Matrix.h>
#include <pcl/memory.h>
-#include <pcl/pcl_base.h>
#include <pcl/pcl_macros.h>
#include <pcl/point_types.h>
#include <pcl/point_representation.h>
#include <pcl/features/feature.h>
#include <pcl/features/spin_image.h>
-#include <pcl/filters/voxel_grid.h>
-#include <pcl/filters/extract_indices.h>
-#include <pcl/search/search.h>
-#include <pcl/kdtree/kdtree.h>
+#include <pcl/kdtree/kdtree_flann.h> // for KdTreeFLANN
namespace pcl
{
pcl::KdTreeFLANN<pcl::InterestPoint>::Ptr tree_;
/** \brief Stores neighbours indices. */
- std::vector<int> k_ind_;
+ pcl::Indices k_ind_;
/** \brief Stores square distances to the corresponding neighbours. */
std::vector<float> k_sqr_dist_;
--- /dev/null
+#include <pcl/recognition/linemod/line_rgbd.h>
+PCL_DEPRECATED_HEADER(1, 15, "Please use <pcl/recognition/linemod/line_rgbd.h> instead")
return (data_.data());
}
- PCL_DEPRECATED(1, 12, "Use new version diff getDifferenceMask(mask0, mask1)")
- static void
- getDifferenceMask(const MaskMap& mask0, const MaskMap& mask1, MaskMap& diff_mask);
-
PCL_NODISCARD
static MaskMap
getDifferenceMask(const MaskMap& mask0, const MaskMap& mask1);
--- /dev/null
+#include <pcl/recognition/ransac_based/model_library.h>
+PCL_DEPRECATED_HEADER(1, 15, "Please use <pcl/recognition/ransac_based/model_library.h> instead")
--- /dev/null
+#include <pcl/recognition/ransac_based/obj_rec_ransac.h>
+PCL_DEPRECATED_HEADER(1, 15, "Please use <pcl/recognition/ransac_based/obj_rec_ransac.h> instead")
--- /dev/null
+#include <pcl/recognition/ransac_based/orr_graph.h>
+PCL_DEPRECATED_HEADER(1, 15, "Please use <pcl/recognition/ransac_based/orr_graph.h> instead")
--- /dev/null
+#include <pcl/recognition/ransac_based/orr_octree.h>
+PCL_DEPRECATED_HEADER(1, 15, "Please use <pcl/recognition/ransac_based/orr_octree.h> instead")
--- /dev/null
+#include <pcl/recognition/ransac_based/orr_octree_zprojection.h>
+PCL_DEPRECATED_HEADER(1, 15, "Please use <pcl/recognition/ransac_based/orr_octree_zprojection.h> instead")
#pragma once
-#include <cmath>
-#include <cstdlib>
-#include <pcl/common/eigen.h>
+#include <Eigen/Core> // for Matrix
+#include <Eigen/Geometry> // for AngleAxis
#include <pcl/point_types.h>
#define AUX_PI_FLOAT 3.14159265358979323846f
#include <pcl/recognition/ransac_based/orr_graph.h>
#include <pcl/recognition/ransac_based/auxiliary.h>
#include <pcl/recognition/ransac_based/bvh.h>
-#include <pcl/registration/transformation_estimation_svd.h>
-#include <pcl/correspondence.h>
#include <pcl/pcl_exports.h>
#include <pcl/point_cloud.h>
#include <cmath>
#include <pcl/point_types.h>
#include <pcl/point_cloud.h>
#include <pcl/pcl_exports.h>
-#include <cstdlib>
-#include <ctime>
#include <vector>
#include <list>
#include <set>
// Some variables for the closest point search
pcl::PointXYZ transformed_source_point;
- std::vector<int> target_index (1);
+ pcl::Indices target_index (1);
std::vector<float> sqr_dist_to_target (1);
float old_energy, energy = std::numeric_limits<float>::max ();
#pragma once
-#include <cstdlib>
-
namespace pcl
{
namespace recognition
--- /dev/null
+#include <pcl/recognition/ransac_based/rigid_transform_space.h>
+PCL_DEPRECATED_HEADER(1, 15, "Please use <pcl/recognition/ransac_based/rigid_transform_space.h> instead")
--- /dev/null
+#include <pcl/recognition/ransac_based/simple_octree.h>
+PCL_DEPRECATED_HEADER(1, 15, "Please use <pcl/recognition/ransac_based/simple_octree.h> instead")
--- /dev/null
+#include <pcl/recognition/ransac_based/trimmed_icp.h>
+PCL_DEPRECATED_HEADER(1, 15, "Please use <pcl/recognition/ransac_based/trimmed_icp.h> instead")
--- /dev/null
+#include <pcl/recognition/ransac_based/voxel_structure.h>
+PCL_DEPRECATED_HEADER(1, 15, "Please use <pcl/recognition/ransac_based/voxel_structure.h> instead")
height_ = height;
}
-//////////////////////////////////////////////////////////////////////////////////////////////
-void
-pcl::MaskMap::getDifferenceMask(const MaskMap& mask0,
- const MaskMap& mask1,
- MaskMap& diff_mask)
-{
- diff_mask = getDifferenceMask(mask0, mask1);
-}
-
//////////////////////////////////////////////////////////////////////////////////////////////
pcl::MaskMap
pcl::MaskMap::getDifferenceMask(const MaskMap& mask0, const MaskMap& mask1)
#include <pcl/common/common.h>
#include <pcl/common/random.h>
#include <cmath>
+#include <ctime> // for time
#include <algorithm>
#include <list>
#pragma once
+#include <pcl/pcl_macros.h>
#if defined __GNUC__
-# pragma GCC system_header
+#pragma GCC system_header
#endif
-#include <pcl/registration/eigen.h>
+#include <unsupported/Eigen/Polynomials> // for PolynomialSolver, PolynomialSolverBase
-namespace Eigen
-{
- template< typename _Scalar >
- class PolynomialSolver<_Scalar,2> : public PolynomialSolverBase<_Scalar,2>
+namespace Eigen {
+template <typename _Scalar>
+class PolynomialSolver<_Scalar, 2> : public PolynomialSolverBase<_Scalar, 2> {
+public:
+ using PS_Base = PolynomialSolverBase<_Scalar, 2>;
+ EIGEN_POLYNOMIAL_SOLVER_BASE_INHERITED_TYPES(PS_Base)
+
+public:
+ virtual ~PolynomialSolver() {}
+
+ template <typename OtherPolynomial>
+ inline PolynomialSolver(const OtherPolynomial& poly, bool& hasRealRoot)
{
- public:
- using PS_Base = PolynomialSolverBase<_Scalar,2>;
- EIGEN_POLYNOMIAL_SOLVER_BASE_INHERITED_TYPES( PS_Base )
-
- public:
-
- virtual ~PolynomialSolver () {}
-
- template< typename OtherPolynomial >
- inline PolynomialSolver( const OtherPolynomial& poly, bool& hasRealRoot )
- {
- compute( poly, hasRealRoot );
- }
-
- /** Computes the complex roots of a new polynomial. */
- template< typename OtherPolynomial >
- void compute( const OtherPolynomial& poly, bool& hasRealRoot)
- {
- const Scalar ZERO(0);
- Scalar a2(2 * poly[2]);
- assert( ZERO != poly[poly.size()-1] );
- Scalar discriminant ((poly[1] * poly[1]) - (4 * poly[0] * poly[2]));
- if (ZERO < discriminant)
- {
- Scalar discriminant_root (std::sqrt (discriminant));
- m_roots[0] = (-poly[1] - discriminant_root) / (a2) ;
- m_roots[1] = (-poly[1] + discriminant_root) / (a2) ;
- hasRealRoot = true;
- }
- else {
- if (ZERO == discriminant)
- {
- m_roots.resize (1);
- m_roots[0] = -poly[1] / a2;
- hasRealRoot = true;
- }
- else
- {
- Scalar discriminant_root (std::sqrt (-discriminant));
- m_roots[0] = RootType (-poly[1] / a2, -discriminant_root / a2);
- m_roots[1] = RootType (-poly[1] / a2, discriminant_root / a2);
- hasRealRoot = false;
- }
- }
+ compute(poly, hasRealRoot);
+ }
+
+ /** Computes the complex roots of a new polynomial. */
+ template <typename OtherPolynomial>
+ void
+ compute(const OtherPolynomial& poly, bool& hasRealRoot)
+ {
+ const Scalar ZERO(0);
+ Scalar a2(2 * poly[2]);
+ assert(ZERO != poly[poly.size() - 1]);
+ Scalar discriminant((poly[1] * poly[1]) - (4 * poly[0] * poly[2]));
+ if (ZERO < discriminant) {
+ Scalar discriminant_root(std::sqrt(discriminant));
+ m_roots[0] = (-poly[1] - discriminant_root) / (a2);
+ m_roots[1] = (-poly[1] + discriminant_root) / (a2);
+ hasRealRoot = true;
+ }
+ else {
+ if (ZERO == discriminant) {
+ m_roots.resize(1);
+ m_roots[0] = -poly[1] / a2;
+ hasRealRoot = true;
}
-
- template< typename OtherPolynomial >
- void compute( const OtherPolynomial& poly)
- {
- bool hasRealRoot;
- compute(poly, hasRealRoot);
+ else {
+ Scalar discriminant_root(std::sqrt(-discriminant));
+ m_roots[0] = RootType(-poly[1] / a2, -discriminant_root / a2);
+ m_roots[1] = RootType(-poly[1] / a2, discriminant_root / a2);
+ hasRealRoot = false;
}
+ }
+ }
- protected:
- using PS_Base::m_roots;
- };
-}
+ template <typename OtherPolynomial>
+ void
+ compute(const OtherPolynomial& poly)
+ {
+ bool hasRealRoot;
+ compute(poly, hasRealRoot);
+ }
+
+protected:
+ using PS_Base::m_roots;
+};
+} // namespace Eigen
namespace BFGSSpace {
- enum Status {
- NegativeGradientEpsilon = -3,
- NotStarted = -2,
- Running = -1,
- Success = 0,
- NoProgress = 1
- };
+enum Status {
+ NegativeGradientEpsilon = -3,
+ NotStarted = -2,
+ Running = -1,
+ Success = 0,
+ NoProgress = 1
+};
}
-template<typename _Scalar, int NX=Eigen::Dynamic>
-struct BFGSDummyFunctor
-{
+template <typename _Scalar, int NX = Eigen::Dynamic>
+struct BFGSDummyFunctor {
using Scalar = _Scalar;
enum { InputsAtCompileTime = NX };
- using VectorType = Eigen::Matrix<Scalar,InputsAtCompileTime,1>;
+ using VectorType = Eigen::Matrix<Scalar, InputsAtCompileTime, 1>;
const int m_inputs;
BFGSDummyFunctor(int inputs) : m_inputs(inputs) {}
virtual ~BFGSDummyFunctor() {}
- int inputs() const { return m_inputs; }
+ int
+ inputs() const
+ {
+ return m_inputs;
+ }
- virtual double operator() (const VectorType &x) = 0;
- virtual void df(const VectorType &x, VectorType &df) = 0;
- virtual void fdf(const VectorType &x, Scalar &f, VectorType &df) = 0;
- virtual BFGSSpace::Status checkGradient(const VectorType& g) { return BFGSSpace::NotStarted; };
+ virtual double
+ operator()(const VectorType& x) = 0;
+ virtual void
+ df(const VectorType& x, VectorType& df) = 0;
+ virtual void
+ fdf(const VectorType& x, Scalar& f, VectorType& df) = 0;
+ virtual BFGSSpace::Status
+ checkGradient(const VectorType& g)
+ {
+ return BFGSSpace::NotStarted;
+ };
};
/**
- * BFGS stands for Broyden–Fletcher–Goldfarb–Shanno (BFGS) method for solving
- * unconstrained nonlinear optimization problems.
+ * BFGS stands for Broyden–Fletcher–Goldfarb–Shanno (BFGS) method for solving
+ * unconstrained nonlinear optimization problems.
* For further details please visit: http://en.wikipedia.org/wiki/BFGS_method
* The method provided here is almost similar to the one provided by GSL.
* It reproduces Fletcher's original algorithm in Practical Methods of Optimization
- * algorithms : 2.6.2 and 2.6.4 and uses the same politics in GSL with cubic
- * interpolation whenever it is possible else falls to quadratic interpolation for
+ * algorithms : 2.6.2 and 2.6.4 and uses the same politics in GSL with cubic
+ * interpolation whenever it is possible else falls to quadratic interpolation for
* alpha parameter.
*/
-template<typename FunctorType>
-class BFGS
-{
+template <typename FunctorType>
+class BFGS {
public:
using Scalar = typename FunctorType::Scalar;
using FVectorType = typename FunctorType::VectorType;
- BFGS(FunctorType &_functor)
- : pnorm(0), g0norm(0), iter(-1), functor(_functor) { }
+ BFGS(FunctorType& _functor) : pnorm(0), g0norm(0), iter(-1), functor(_functor) {}
using Index = Eigen::DenseIndex;
struct Parameters {
Parameters()
: max_iters(400)
- , bracket_iters(100)
- , section_iters(100)
- , rho(0.01)
- , sigma(0.01)
- , tau1(9)
- , tau2(0.05)
- , tau3(0.5)
- , step_size(1)
- , order(3) {}
- Index max_iters; // maximum number of function evaluation
+ , bracket_iters(100)
+ , section_iters(100)
+ , rho(0.01)
+ , sigma(0.01)
+ , tau1(9)
+ , tau2(0.05)
+ , tau3(0.5)
+ , step_size(1)
+ , order(3)
+ {}
+ Index max_iters; // maximum number of function evaluation
Index bracket_iters;
Index section_iters;
Scalar rho;
Index order;
};
- BFGSSpace::Status minimize(FVectorType &x);
- BFGSSpace::Status minimizeInit(FVectorType &x);
- BFGSSpace::Status minimizeOneStep(FVectorType &x);
- BFGSSpace::Status testGradient();
+ BFGSSpace::Status
+ minimize(FVectorType& x);
+ BFGSSpace::Status
+ minimizeInit(FVectorType& x);
+ BFGSSpace::Status
+ minimizeOneStep(FVectorType& x);
+ BFGSSpace::Status
+ testGradient();
PCL_DEPRECATED(1, 13, "Use `testGradient()` instead")
BFGSSpace::Status testGradient(Scalar) { return testGradient(); }
- void resetParameters(void) { parameters = Parameters(); }
-
+ void
+ resetParameters(void)
+ {
+ parameters = Parameters();
+ }
+
Parameters parameters;
Scalar f;
FVectorType gradient;
+
private:
-
- BFGS& operator=(const BFGS&);
- BFGSSpace::Status lineSearch (Scalar rho, Scalar sigma,
- Scalar tau1, Scalar tau2, Scalar tau3,
- int order, Scalar alpha1, Scalar &alpha_new);
- Scalar interpolate (Scalar a, Scalar fa, Scalar fpa,
- Scalar b, Scalar fb, Scalar fpb, Scalar xmin, Scalar xmax,
- int order);
- void checkExtremum (const Eigen::Matrix<Scalar, 4, 1>& coefficients, Scalar x, Scalar& xmin, Scalar& fmin);
- void moveTo (Scalar alpha);
- Scalar slope ();
- Scalar applyF (Scalar alpha);
- Scalar applyDF (Scalar alpha);
- void applyFDF (Scalar alpha, Scalar &f, Scalar &df);
- void updatePosition (Scalar alpha, FVectorType& x, Scalar& f, FVectorType& g);
- void changeDirection ();
-
+ BFGS&
+ operator=(const BFGS&);
+ BFGSSpace::Status
+ lineSearch(Scalar rho,
+ Scalar sigma,
+ Scalar tau1,
+ Scalar tau2,
+ Scalar tau3,
+ int order,
+ Scalar alpha1,
+ Scalar& alpha_new);
+ Scalar
+ interpolate(Scalar a,
+ Scalar fa,
+ Scalar fpa,
+ Scalar b,
+ Scalar fb,
+ Scalar fpb,
+ Scalar xmin,
+ Scalar xmax,
+ int order);
+ void
+ checkExtremum(const Eigen::Matrix<Scalar, 4, 1>& coefficients,
+ Scalar x,
+ Scalar& xmin,
+ Scalar& fmin);
+ void
+ moveTo(Scalar alpha);
+ Scalar
+ slope();
+ Scalar
+ applyF(Scalar alpha);
+ Scalar
+ applyDF(Scalar alpha);
+ void
+ applyFDF(Scalar alpha, Scalar& f, Scalar& df);
+ void
+ updatePosition(Scalar alpha, FVectorType& x, Scalar& f, FVectorType& g);
+ void
+ changeDirection();
+
Scalar delta_f, fp0;
FVectorType x0, dx0, dg0, g0, dx, p;
Scalar pnorm, g0norm;
Scalar df_alpha;
FVectorType x_alpha;
FVectorType g_alpha;
-
+
// cache "keys"
Scalar f_cache_key;
Scalar df_cache_key;
Scalar g_cache_key;
Index iter;
- FunctorType &functor;
+ FunctorType& functor;
};
-
-template<typename FunctorType> void
-BFGS<FunctorType>::checkExtremum(const Eigen::Matrix<Scalar, 4, 1>& coefficients, Scalar x, Scalar& xmin, Scalar& fmin)
+template <typename FunctorType>
+void
+BFGS<FunctorType>::checkExtremum(const Eigen::Matrix<Scalar, 4, 1>& coefficients,
+ Scalar x,
+ Scalar& xmin,
+ Scalar& fmin)
{
Scalar y = Eigen::poly_eval(coefficients, x);
- if(y < fmin) { xmin = x; fmin = y; }
+ if (y < fmin) {
+ xmin = x;
+ fmin = y;
+ }
}
-template<typename FunctorType> void
+template <typename FunctorType>
+void
BFGS<FunctorType>::moveTo(Scalar alpha)
{
x_alpha = x0 + alpha * p;
x_cache_key = alpha;
}
-template<typename FunctorType> typename BFGS<FunctorType>::Scalar
+template <typename FunctorType>
+typename BFGS<FunctorType>::Scalar
BFGS<FunctorType>::slope()
{
- return (g_alpha.dot (p));
+ return (g_alpha.dot(p));
}
-template<typename FunctorType> typename BFGS<FunctorType>::Scalar
+template <typename FunctorType>
+typename BFGS<FunctorType>::Scalar
BFGS<FunctorType>::applyF(Scalar alpha)
{
- if (alpha == f_cache_key) return f_alpha;
- moveTo (alpha);
- f_alpha = functor (x_alpha);
+ if (alpha == f_cache_key)
+ return f_alpha;
+ moveTo(alpha);
+ f_alpha = functor(x_alpha);
f_cache_key = alpha;
return (f_alpha);
}
-template<typename FunctorType> typename BFGS<FunctorType>::Scalar
+template <typename FunctorType>
+typename BFGS<FunctorType>::Scalar
BFGS<FunctorType>::applyDF(Scalar alpha)
{
- if (alpha == df_cache_key) return df_alpha;
- moveTo (alpha);
- if(alpha != g_cache_key)
- {
- functor.df (x_alpha, g_alpha);
+ if (alpha == df_cache_key)
+ return df_alpha;
+ moveTo(alpha);
+ if (alpha != g_cache_key) {
+ functor.df(x_alpha, g_alpha);
g_cache_key = alpha;
}
- df_alpha = slope ();
+ df_alpha = slope();
df_cache_key = alpha;
return (df_alpha);
}
-template<typename FunctorType> void
+template <typename FunctorType>
+void
BFGS<FunctorType>::applyFDF(Scalar alpha, Scalar& f, Scalar& df)
{
- if(alpha == f_cache_key && alpha == df_cache_key)
- {
+ if (alpha == f_cache_key && alpha == df_cache_key) {
f = f_alpha;
df = df_alpha;
return;
}
- if(alpha == f_cache_key || alpha == df_cache_key)
- {
- f = applyF (alpha);
- df = applyDF (alpha);
+ if (alpha == f_cache_key || alpha == df_cache_key) {
+ f = applyF(alpha);
+ df = applyDF(alpha);
return;
}
- moveTo (alpha);
- functor.fdf (x_alpha, f_alpha, g_alpha);
+ moveTo(alpha);
+ functor.fdf(x_alpha, f_alpha, g_alpha);
f_cache_key = alpha;
g_cache_key = alpha;
- df_alpha = slope ();
+ df_alpha = slope();
df_cache_key = alpha;
f = f_alpha;
df = df_alpha;
}
-template<typename FunctorType> void
-BFGS<FunctorType>::updatePosition (Scalar alpha, FVectorType &x, Scalar &f, FVectorType &g)
+template <typename FunctorType>
+void
+BFGS<FunctorType>::updatePosition(Scalar alpha,
+ FVectorType& x,
+ Scalar& f,
+ FVectorType& g)
{
- {
- Scalar f_alpha, df_alpha;
- applyFDF (alpha, f_alpha, df_alpha);
- } ;
+ {
+ Scalar f_alpha, df_alpha;
+ applyFDF(alpha, f_alpha, df_alpha);
+ };
f = f_alpha;
x = x_alpha;
g = g_alpha;
-}
+}
-template<typename FunctorType> void
-BFGS<FunctorType>::changeDirection ()
+template <typename FunctorType>
+void
+BFGS<FunctorType>::changeDirection()
{
x_alpha = x0;
x_cache_key = 0.0;
f_cache_key = 0.0;
g_alpha = g0;
g_cache_key = 0.0;
- df_alpha = slope ();
+ df_alpha = slope();
df_cache_key = 0.0;
}
-template<typename FunctorType> BFGSSpace::Status
-BFGS<FunctorType>::minimize(FVectorType &x)
+template <typename FunctorType>
+BFGSSpace::Status
+BFGS<FunctorType>::minimize(FVectorType& x)
{
BFGSSpace::Status status = minimizeInit(x);
do {
status = minimizeOneStep(x);
iter++;
- } while (status==BFGSSpace::Success && iter < parameters.max_iters);
+ } while (status == BFGSSpace::Success && iter < parameters.max_iters);
return status;
}
-template<typename FunctorType> BFGSSpace::Status
-BFGS<FunctorType>::minimizeInit(FVectorType &x)
+template <typename FunctorType>
+BFGSSpace::Status
+BFGS<FunctorType>::minimizeInit(FVectorType& x)
{
iter = 0;
delta_f = 0;
- dx.setZero ();
+ dx.setZero();
functor.fdf(x, f, gradient);
x0 = x;
g0 = gradient;
- g0norm = g0.norm ();
- p = gradient * -1/g0norm;
- pnorm = p.norm ();
+ g0norm = g0.norm();
+ p = gradient * -1 / g0norm;
+ pnorm = p.norm();
fp0 = -g0norm;
{
- x_alpha = x0; x_cache_key = 0;
-
- f_alpha = f; f_cache_key = 0;
-
- g_alpha = g0; g_cache_key = 0;
-
- df_alpha = slope (); df_cache_key = 0;
+ x_alpha = x0;
+ x_cache_key = 0;
+
+ f_alpha = f;
+ f_cache_key = 0;
+
+ g_alpha = g0;
+ g_cache_key = 0;
+
+ df_alpha = slope();
+ df_cache_key = 0;
}
return BFGSSpace::NotStarted;
}
-template<typename FunctorType> BFGSSpace::Status
-BFGS<FunctorType>::minimizeOneStep(FVectorType &x)
+template <typename FunctorType>
+BFGSSpace::Status
+BFGS<FunctorType>::minimizeOneStep(FVectorType& x)
{
Scalar alpha = 0.0, alpha1;
Scalar f0 = f;
- if (pnorm == 0.0 || g0norm == 0.0 || fp0 == 0)
- {
- dx.setZero ();
+ if (pnorm == 0.0 || g0norm == 0.0 || fp0 == 0) {
+ dx.setZero();
return BFGSSpace::NoProgress;
}
- if (delta_f < 0)
- {
- Scalar del = std::max (-delta_f, 10 * std::numeric_limits<Scalar>::epsilon() * std::abs(f0));
- alpha1 = std::min (1.0, 2.0 * del / (-fp0));
+ if (delta_f < 0) {
+ Scalar del =
+ std::max(-delta_f, 10 * std::numeric_limits<Scalar>::epsilon() * std::abs(f0));
+ alpha1 = std::min(1.0, 2.0 * del / (-fp0));
}
else
alpha1 = std::abs(parameters.step_size);
- BFGSSpace::Status status = lineSearch(parameters.rho, parameters.sigma,
- parameters.tau1, parameters.tau2, parameters.tau3,
- parameters.order, alpha1, alpha);
+ BFGSSpace::Status status = lineSearch(parameters.rho,
+ parameters.sigma,
+ parameters.tau1,
+ parameters.tau2,
+ parameters.tau3,
+ parameters.order,
+ alpha1,
+ alpha);
- if(status != BFGSSpace::Success)
+ if (status != BFGSSpace::Success)
return status;
updatePosition(alpha, x, f, gradient);
/* dg0 = g - g0 */
dg0 = gradient - g0;
- dxg = dx0.dot (gradient);
- dgg = dg0.dot (gradient);
- dxdg = dx0.dot (dg0);
- dgnorm = dg0.norm ();
+ dxg = dx0.dot(gradient);
+ dgg = dg0.dot(gradient);
+ dxdg = dx0.dot(dg0);
+ dgnorm = dg0.norm();
- if (dxdg != 0)
- {
+ if (dxdg != 0) {
B = dxg / dxdg;
A = -(1.0 + dgnorm * dgnorm / dxdg) * B + dgg / dxdg;
}
- else
- {
+ else {
B = 0;
A = 0;
}
p = -A * dx0;
- p+= gradient;
- p+= -B * dg0 ;
+ p += gradient;
+ p += -B * dg0;
}
g0 = gradient;
x0 = x;
- g0norm = g0.norm ();
- pnorm = p.norm ();
-
- Scalar dir = ((p.dot (gradient)) > 0) ? -1.0 : 1.0;
- p*= dir / pnorm;
- pnorm = p.norm ();
- fp0 = p.dot (g0);
+ g0norm = g0.norm();
+ pnorm = p.norm();
+
+ Scalar dir = ((p.dot(gradient)) > 0) ? -1.0 : 1.0;
+ p *= dir / pnorm;
+ pnorm = p.norm();
+ fp0 = p.dot(g0);
changeDirection();
return BFGSSpace::Success;
return functor.checkGradient(gradient);
}
-template<typename FunctorType> typename BFGS<FunctorType>::Scalar
-BFGS<FunctorType>::interpolate (Scalar a, Scalar fa, Scalar fpa,
- Scalar b, Scalar fb, Scalar fpb,
- Scalar xmin, Scalar xmax,
- int order)
+template <typename FunctorType>
+typename BFGS<FunctorType>::Scalar
+BFGS<FunctorType>::interpolate(Scalar a,
+ Scalar fa,
+ Scalar fpa,
+ Scalar b,
+ Scalar fb,
+ Scalar fpb,
+ Scalar xmin,
+ Scalar xmax,
+ int order)
{
/* Map [a,b] to [0,1] */
Scalar y, alpha, ymin, ymax, fmin;
ymin = (xmin - a) / (b - a);
ymax = (xmax - a) / (b - a);
-
+
// Ensure ymin <= ymax
- if (ymin > ymax) { Scalar tmp = ymin; ymin = ymax; ymax = tmp; };
+ if (ymin > ymax) {
+ Scalar tmp = ymin;
+ ymin = ymax;
+ ymax = tmp;
+ };
- if (order > 2 && !(fpb != fpa) && fpb != std::numeric_limits<Scalar>::infinity ())
- {
+ if (order > 2 && !(fpb != fpa) && fpb != std::numeric_limits<Scalar>::infinity()) {
fpa = fpa * (b - a);
fpb = fpb * (b - a);
Scalar y0, y1;
Eigen::Matrix<Scalar, 4, 1> coefficients;
coefficients << c0, c1, c2, c3;
-
- y = ymin;
+
+ y = ymin;
// Evaluate the cubic polyinomial at ymin;
- fmin = Eigen::poly_eval (coefficients, ymin);
- checkExtremum (coefficients, ymax, y, fmin);
+ fmin = Eigen::poly_eval(coefficients, ymin);
+ checkExtremum(coefficients, ymax, y, fmin);
{
// Solve quadratic polynomial for the derivate
Eigen::Matrix<Scalar, 3, 1> coefficients2;
coefficients2 << c1, 2 * c2, 3 * c3;
bool real_roots;
- Eigen::PolynomialSolver<Scalar, 2> solver (coefficients2, real_roots);
- if(real_roots)
- {
- if ((solver.roots ()).size () == 2) /* found 2 roots */
+ Eigen::PolynomialSolver<Scalar, 2> solver(coefficients2, real_roots);
+ if (real_roots) {
+ if ((solver.roots()).size() == 2) /* found 2 roots */
{
- y0 = std::real (solver.roots () [0]);
- y1 = std::real (solver.roots () [1]);
- if(y0 > y1) { Scalar tmp (y0); y0 = y1; y1 = tmp; }
- if (y0 > ymin && y0 < ymax)
- checkExtremum (coefficients, y0, y, fmin);
- if (y1 > ymin && y1 < ymax)
- checkExtremum (coefficients, y1, y, fmin);
+ y0 = std::real(solver.roots()[0]);
+ y1 = std::real(solver.roots()[1]);
+ if (y0 > y1) {
+ Scalar tmp(y0);
+ y0 = y1;
+ y1 = tmp;
+ }
+ if (y0 > ymin && y0 < ymax)
+ checkExtremum(coefficients, y0, y, fmin);
+ if (y1 > ymin && y1 < ymax)
+ checkExtremum(coefficients, y1, y, fmin);
}
- else if ((solver.roots ()).size () == 1) /* found 1 root */
+ else if ((solver.roots()).size() == 1) /* found 1 root */
{
- y0 = std::real (solver.roots () [0]);
- if (y0 > ymin && y0 < ymax)
- checkExtremum (coefficients, y0, y, fmin);
+ y0 = std::real(solver.roots()[0]);
+ if (y0 > ymin && y0 < ymax)
+ checkExtremum(coefficients, y0, y, fmin);
}
}
}
- }
- else
- {
+ }
+ else {
fpa = fpa * (b - a);
- Scalar fl = fa + ymin*(fpa + ymin*(fb - fa -fpa));
- Scalar fh = fa + ymax*(fpa + ymax*(fb - fa -fpa));
- Scalar c = 2 * (fb - fa - fpa); /* curvature */
- y = ymin; fmin = fl;
-
- if (fh < fmin) { y = ymax; fmin = fh; }
-
- if (c > a) /* positive curvature required for a minimum */
+ Scalar fl = fa + ymin * (fpa + ymin * (fb - fa - fpa));
+ Scalar fh = fa + ymax * (fpa + ymax * (fb - fa - fpa));
+ Scalar c = 2 * (fb - fa - fpa); /* curvature */
+ y = ymin;
+ fmin = fl;
+
+ if (fh < fmin) {
+ y = ymax;
+ fmin = fh;
+ }
+
+ if (c > a) /* positive curvature required for a minimum */
{
- Scalar z = -fpa / c; /* location of minimum */
+ Scalar z = -fpa / c; /* location of minimum */
if (z > ymin && z < ymax) {
- Scalar f = fa + z*(fpa + z*(fb - fa -fpa));
- if (f < fmin) { y = z; fmin = f; };
+ Scalar f = fa + z * (fpa + z * (fb - fa - fpa));
+ if (f < fmin) {
+ y = z;
+ fmin = f;
+ };
}
}
}
-
+
alpha = a + y * (b - a);
return alpha;
}
-template<typename FunctorType> BFGSSpace::Status
-BFGS<FunctorType>::lineSearch(Scalar rho, Scalar sigma,
- Scalar tau1, Scalar tau2, Scalar tau3,
- int order, Scalar alpha1, Scalar &alpha_new)
+template <typename FunctorType>
+BFGSSpace::Status
+BFGS<FunctorType>::lineSearch(Scalar rho,
+ Scalar sigma,
+ Scalar tau1,
+ Scalar tau2,
+ Scalar tau3,
+ int order,
+ Scalar alpha1,
+ Scalar& alpha_new)
{
Scalar f0, fp0, falpha, falpha_prev, fpalpha, fpalpha_prev, delta, alpha_next;
Scalar alpha = alpha1, alpha_prev = 0.0;
Scalar a, b, fa, fb, fpa, fpb;
Index i = 0;
- applyFDF (0.0, f0, fp0);
+ applyFDF(0.0, f0, fp0);
falpha_prev = f0;
fpalpha_prev = fp0;
/* Avoid uninitialized variables morning */
- a = 0.0; b = alpha;
- fa = f0; fb = 0.0;
- fpa = fp0; fpb = 0.0;
-
- /* Begin bracketing */
-
- while (i++ < parameters.bracket_iters)
- {
- falpha = applyF (alpha);
-
- if (falpha > f0 + alpha * rho * fp0 || falpha >= falpha_prev)
- {
- a = alpha_prev; fa = falpha_prev; fpa = fpalpha_prev;
- b = alpha; fb = falpha; fpb = std::numeric_limits<Scalar>::quiet_NaN ();
+ a = 0.0;
+ b = alpha;
+ fa = f0;
+ fb = 0.0;
+ fpa = fp0;
+ fpb = 0.0;
+
+ /* Begin bracketing */
+
+ while (i++ < parameters.bracket_iters) {
+ falpha = applyF(alpha);
+
+ if (falpha > f0 + alpha * rho * fp0 || falpha >= falpha_prev) {
+ a = alpha_prev;
+ fa = falpha_prev;
+ fpa = fpalpha_prev;
+ b = alpha;
+ fb = falpha;
+ fpb = std::numeric_limits<Scalar>::quiet_NaN();
break;
- }
+ }
- fpalpha = applyDF (alpha);
+ fpalpha = applyDF(alpha);
/* Fletcher's sigma test */
- if (std::abs (fpalpha) <= -sigma * fp0)
- {
+ if (std::abs(fpalpha) <= -sigma * fp0) {
alpha_new = alpha;
return BFGSSpace::Success;
}
- if (fpalpha >= 0)
- {
- a = alpha; fa = falpha; fpa = fpalpha;
- b = alpha_prev; fb = falpha_prev; fpb = fpalpha_prev;
- break; /* goto sectioning */
+ if (fpalpha >= 0) {
+ a = alpha;
+ fa = falpha;
+ fpa = fpalpha;
+ b = alpha_prev;
+ fb = falpha_prev;
+ fpb = fpalpha_prev;
+ break; /* goto sectioning */
}
delta = alpha - alpha_prev;
Scalar lower = alpha + delta;
Scalar upper = alpha + tau1 * delta;
- alpha_next = interpolate (alpha_prev, falpha_prev, fpalpha_prev,
- alpha, falpha, fpalpha, lower, upper, order);
-
+ alpha_next = interpolate(alpha_prev,
+ falpha_prev,
+ fpalpha_prev,
+ alpha,
+ falpha,
+ fpalpha,
+ lower,
+ upper,
+ order);
}
alpha_prev = alpha;
alpha = alpha_next;
}
/* Sectioning of bracket [a,b] */
- while (i++ < parameters.section_iters)
- {
+ while (i++ < parameters.section_iters) {
delta = b - a;
-
+
{
Scalar lower = a + tau2 * delta;
Scalar upper = b - tau3 * delta;
-
- alpha = interpolate (a, fa, fpa, b, fb, fpb, lower, upper, order);
+
+ alpha = interpolate(a, fa, fpa, b, fb, fpb, lower, upper, order);
}
- falpha = applyF (alpha);
- if ((a-alpha)*fpa <= std::numeric_limits<Scalar>::epsilon ()) {
+ falpha = applyF(alpha);
+ if ((a - alpha) * fpa <= std::numeric_limits<Scalar>::epsilon()) {
/* roundoff prevents progress */
return BFGSSpace::NoProgress;
};
- if (falpha > f0 + rho * alpha * fp0 || falpha >= fa)
- {
+ if (falpha > f0 + rho * alpha * fp0 || falpha >= fa) {
/* a_next = a; */
- b = alpha; fb = falpha; fpb = std::numeric_limits<Scalar>::quiet_NaN ();
+ b = alpha;
+ fb = falpha;
+ fpb = std::numeric_limits<Scalar>::quiet_NaN();
}
- else
- {
- fpalpha = applyDF (alpha);
-
- if (std::abs(fpalpha) <= -sigma * fp0)
- {
+ else {
+ fpalpha = applyDF(alpha);
+
+ if (std::abs(fpalpha) <= -sigma * fp0) {
alpha_new = alpha;
- return BFGSSpace::Success; /* terminate */
+ return BFGSSpace::Success; /* terminate */
}
-
- if ( ((b-a) >= 0 && fpalpha >= 0) || ((b-a) <=0 && fpalpha <= 0))
- {
- b = a; fb = fa; fpb = fpa;
- a = alpha; fa = falpha; fpa = fpalpha;
+
+ if (((b - a) >= 0 && fpalpha >= 0) || ((b - a) <= 0 && fpalpha <= 0)) {
+ b = a;
+ fb = fa;
+ fpb = fpa;
+ a = alpha;
+ fa = falpha;
+ fpa = fpalpha;
}
- else
- {
- a = alpha; fa = falpha; fpa = fpalpha;
+ else {
+ a = alpha;
+ fa = falpha;
+ fpa = fpalpha;
}
}
}
*/
#pragma once
-
+PCL_DEPRECATED_HEADER(1, 15, "Please include the needed boost headers directly.")
#if defined __GNUC__
-# pragma GCC system_header
+#pragma GCC system_header
#endif
//#include <boost/graph/adjacency_list.hpp>
-#include <boost/graph/graph_traits.hpp>
#include <boost/graph/dijkstra_shortest_paths.hpp>
-#include <boost/property_map/property_map.hpp>
-
+#include <boost/graph/graph_traits.hpp>
#include <boost/noncopyable.hpp>
+#include <boost/property_map/property_map.hpp>
#pragma once
#include <boost/graph/adjacency_list.hpp>
+
#include <Eigen/StdVector>
+
#include <list>
-namespace boost
-{
+namespace boost {
- struct eigen_vecS
- {
- };
+struct eigen_vecS {};
- template <class ValueType>
- struct container_gen<eigen_vecS, ValueType>
- {
- using type = std::vector<ValueType, Eigen::aligned_allocator<ValueType> >;
- };
+template <class ValueType>
+struct container_gen<eigen_vecS, ValueType> {
+ using type = std::vector<ValueType, Eigen::aligned_allocator<ValueType>>;
+};
- template <>
- struct parallel_edge_traits<eigen_vecS>
- {
- using type = allow_parallel_edge_tag;
- };
+template <>
+struct parallel_edge_traits<eigen_vecS> {
+ using type = allow_parallel_edge_tag;
+};
- namespace detail
- {
- template <>
- struct is_random_access<eigen_vecS>
- {
- enum { value = true };
- using type = mpl::true_;
- };
- }
+namespace detail {
+template <>
+struct is_random_access<eigen_vecS> {
+ enum { value = true };
+ using type = mpl::true_;
+};
+} // namespace detail
- struct eigen_listS
- {
- };
+struct eigen_listS {};
- template <class ValueType>
- struct container_gen<eigen_listS, ValueType>
- {
- using type = std::list<ValueType, Eigen::aligned_allocator<ValueType> >;
- };
+template <class ValueType>
+struct container_gen<eigen_listS, ValueType> {
+ using type = std::list<ValueType, Eigen::aligned_allocator<ValueType>>;
+};
- template <>
- struct parallel_edge_traits<eigen_listS>
- {
- using type = allow_parallel_edge_tag;
- };
+template <>
+struct parallel_edge_traits<eigen_listS> {
+ using type = allow_parallel_edge_tag;
+};
- namespace detail
- {
- template <>
- struct is_random_access<eigen_listS>
- {
- enum { value = false };
- using type = mpl::false_;
- };
- }
-}
+namespace detail {
+template <>
+struct is_random_access<eigen_listS> {
+ enum { value = false };
+ using type = mpl::false_;
+};
+} // namespace detail
+} // namespace boost
#include <pcl/memory.h>
#include <pcl/pcl_macros.h>
-namespace pcl
-{
- namespace registration
- {
- /** \brief @b ConvergenceCriteria represents an abstract base class for
- * different convergence criteria used in registration loops.
- *
- * This should be used as part of an Iterative Closest Point (ICP)-like
- * method, to verify if the algorithm has reached convergence.
- *
- * Typical convergence criteria that could inherit from this include:
- *
- * * a maximum number of iterations has been reached
- * * the transformation (R, t) cannot be further updated (the difference between current and previous is smaller than a threshold)
- * * the Mean Squared Error (MSE) between the current set of correspondences and the previous one is smaller than some threshold
- *
- * \author Radu B. Rusu
- * \ingroup registration
- */
- class PCL_EXPORTS ConvergenceCriteria
- {
- public:
- using Ptr = shared_ptr<ConvergenceCriteria>;
- using ConstPtr = shared_ptr<const ConvergenceCriteria>;
+namespace pcl {
+namespace registration {
+/** \brief @b ConvergenceCriteria represents an abstract base class for
+ * different convergence criteria used in registration loops.
+ *
+ * This should be used as part of an Iterative Closest Point (ICP)-like
+ * method, to verify if the algorithm has reached convergence.
+ *
+ * Typical convergence criteria that could inherit from this include:
+ *
+ * * a maximum number of iterations has been reached
+ * * the transformation (R, t) cannot be further updated (the difference between
+ * current and previous is smaller than a threshold)
+ * * the Mean Squared Error (MSE) between the current set of correspondences and the
+ * previous one is smaller than some threshold
+ *
+ * \author Radu B. Rusu
+ * \ingroup registration
+ */
+class PCL_EXPORTS ConvergenceCriteria {
+public:
+ using Ptr = shared_ptr<ConvergenceCriteria>;
+ using ConstPtr = shared_ptr<const ConvergenceCriteria>;
- /** \brief Empty constructor. */
- ConvergenceCriteria () {}
+ /** \brief Empty constructor. */
+ ConvergenceCriteria() {}
- /** \brief Empty destructor. */
- virtual ~ConvergenceCriteria () {}
+ /** \brief Empty destructor. */
+ virtual ~ConvergenceCriteria() {}
- /** \brief Check if convergence has been reached. Pure virtual. */
- virtual bool
- hasConverged () = 0;
+ /** \brief Check if convergence has been reached. Pure virtual. */
+ virtual bool
+ hasConverged() = 0;
- /** \brief Bool operator. */
- operator bool ()
- {
- return (hasConverged ());
- }
- };
- }
-}
+ /** \brief Bool operator. */
+ operator bool() { return (hasConverged()); }
+};
+} // namespace registration
+} // namespace pcl
#pragma once
-#include <string>
-
-#include <pcl/pcl_base.h>
-#include <pcl/common/transforms.h>
+#include <pcl/common/io.h> // for getFields
+#include <pcl/registration/correspondence_types.h>
#include <pcl/search/kdtree.h>
#include <pcl/memory.h>
+#include <pcl/pcl_base.h>
#include <pcl/pcl_macros.h>
-#include <pcl/registration/correspondence_types.h>
+#include <string>
+
+namespace pcl {
+namespace registration {
+/** \brief Abstract @b CorrespondenceEstimationBase class.
+ * All correspondence estimation methods should inherit from this.
+ * \author Radu B. Rusu
+ * \ingroup registration
+ */
+template <typename PointSource, typename PointTarget, typename Scalar = float>
+class CorrespondenceEstimationBase : public PCLBase<PointSource> {
+public:
+ using Ptr =
+ shared_ptr<CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>>;
+ using ConstPtr =
+ shared_ptr<const CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>>;
+
+ // using PCLBase<PointSource>::initCompute;
+ using PCLBase<PointSource>::deinitCompute;
+ using PCLBase<PointSource>::input_;
+ using PCLBase<PointSource>::indices_;
+ using PCLBase<PointSource>::setIndices;
+
+ using KdTree = pcl::search::KdTree<PointTarget>;
+ using KdTreePtr = typename KdTree::Ptr;
+
+ using KdTreeReciprocal = pcl::search::KdTree<PointSource>;
+ using KdTreeReciprocalPtr = typename KdTree::Ptr;
+
+ using PointCloudSource = pcl::PointCloud<PointSource>;
+ using PointCloudSourcePtr = typename PointCloudSource::Ptr;
+ using PointCloudSourceConstPtr = typename PointCloudSource::ConstPtr;
+
+ using PointCloudTarget = pcl::PointCloud<PointTarget>;
+ using PointCloudTargetPtr = typename PointCloudTarget::Ptr;
+ using PointCloudTargetConstPtr = typename PointCloudTarget::ConstPtr;
+
+ using PointRepresentationConstPtr = typename KdTree::PointRepresentationConstPtr;
+
+ /** \brief Empty constructor. */
+ CorrespondenceEstimationBase()
+ : corr_name_("CorrespondenceEstimationBase")
+ , tree_(new pcl::search::KdTree<PointTarget>)
+ , tree_reciprocal_(new pcl::search::KdTree<PointSource>)
+ , target_()
+ , point_representation_()
+ , input_transformed_()
+ , target_cloud_updated_(true)
+ , source_cloud_updated_(true)
+ , force_no_recompute_(false)
+ , force_no_recompute_reciprocal_(false)
+ {}
+
+ /** \brief Empty destructor */
+ ~CorrespondenceEstimationBase() {}
+
+ /** \brief Provide a pointer to the input source
+ * (e.g., the point cloud that we want to align to the target)
+ *
+ * \param[in] cloud the input point cloud source
+ */
+ inline void
+ setInputSource(const PointCloudSourceConstPtr& cloud)
+ {
+ source_cloud_updated_ = true;
+ PCLBase<PointSource>::setInputCloud(cloud);
+ input_fields_ = pcl::getFields<PointSource>();
+ }
+
+ /** \brief Get a pointer to the input point cloud dataset target. */
+ inline PointCloudSourceConstPtr const
+ getInputSource()
+ {
+ return (input_);
+ }
+
+ /** \brief Provide a pointer to the input target
+ * (e.g., the point cloud that we want to align the input source to)
+ * \param[in] cloud the input point cloud target
+ */
+ inline void
+ setInputTarget(const PointCloudTargetConstPtr& cloud);
+
+ /** \brief Get a pointer to the input point cloud dataset target. */
+ inline PointCloudTargetConstPtr const
+ getInputTarget()
+ {
+ return (target_);
+ }
+
+ /** \brief See if this rejector requires source normals */
+ virtual bool
+ requiresSourceNormals() const
+ {
+ return (false);
+ }
+
+ /** \brief Abstract method for setting the source normals */
+ virtual void setSourceNormals(pcl::PCLPointCloud2::ConstPtr /*cloud2*/)
+ {
+ PCL_WARN("[pcl::registration::%s::setSourceNormals] This class does not require "
+ "input source normals\n",
+ getClassName().c_str());
+ }
+
+ /** \brief See if this rejector requires target normals */
+ virtual bool
+ requiresTargetNormals() const
+ {
+ return (false);
+ }
+
+ /** \brief Abstract method for setting the target normals */
+ virtual void setTargetNormals(pcl::PCLPointCloud2::ConstPtr /*cloud2*/)
+ {
+ PCL_WARN("[pcl::registration::%s::setTargetNormals] This class does not require "
+ "input target normals\n",
+ getClassName().c_str());
+ }
+
+ /** \brief Provide a pointer to the vector of indices that represent the
+ * input source point cloud.
+ * \param[in] indices a pointer to the vector of indices
+ */
+ inline void
+ setIndicesSource(const IndicesPtr& indices)
+ {
+ setIndices(indices);
+ }
+
+ /** \brief Get a pointer to the vector of indices used for the source dataset. */
+ inline IndicesPtr const
+ getIndicesSource()
+ {
+ return (indices_);
+ }
+
+ /** \brief Provide a pointer to the vector of indices that represent the input target
+ * point cloud. \param[in] indices a pointer to the vector of indices
+ */
+ inline void
+ setIndicesTarget(const IndicesPtr& indices)
+ {
+ target_cloud_updated_ = true;
+ target_indices_ = indices;
+ }
+
+ /** \brief Get a pointer to the vector of indices used for the target dataset. */
+ inline IndicesPtr const
+ getIndicesTarget()
+ {
+ return (target_indices_);
+ }
-namespace pcl
-{
- namespace registration
+ /** \brief Provide a pointer to the search object used to find correspondences in
+ * the target cloud.
+ * \param[in] tree a pointer to the spatial search object.
+ * \param[in] force_no_recompute If set to true, this tree will NEVER be
+ * recomputed, regardless of calls to setInputTarget. Only use if you are
+ * confident that the tree will be set correctly.
+ */
+ inline void
+ setSearchMethodTarget(const KdTreePtr& tree, bool force_no_recompute = false)
+ {
+ tree_ = tree;
+ force_no_recompute_ = force_no_recompute;
+ // Since we just set a new tree, we need to check for updates
+ target_cloud_updated_ = true;
+ }
+
+ /** \brief Get a pointer to the search method used to find correspondences in the
+ * target cloud. */
+ inline KdTreePtr
+ getSearchMethodTarget() const
+ {
+ return (tree_);
+ }
+
+ /** \brief Provide a pointer to the search object used to find correspondences in
+ * the source cloud (usually used by reciprocal correspondence finding).
+ * \param[in] tree a pointer to the spatial search object.
+ * \param[in] force_no_recompute If set to true, this tree will NEVER be
+ * recomputed, regardless of calls to setInputSource. Only use if you are
+ * extremely confident that the tree will be set correctly.
+ */
+ inline void
+ setSearchMethodSource(const KdTreeReciprocalPtr& tree,
+ bool force_no_recompute = false)
+ {
+ tree_reciprocal_ = tree;
+ force_no_recompute_reciprocal_ = force_no_recompute;
+ // Since we just set a new tree, we need to check for updates
+ source_cloud_updated_ = true;
+ }
+
+ /** \brief Get a pointer to the search method used to find correspondences in the
+ * source cloud. */
+ inline KdTreeReciprocalPtr
+ getSearchMethodSource() const
+ {
+ return (tree_reciprocal_);
+ }
+
+ /** \brief Determine the correspondences between input and target cloud.
+ * \param[out] correspondences the found correspondences (index of query point, index
+ * of target point, distance) \param[in] max_distance maximum allowed distance between
+ * correspondences
+ */
+ virtual void
+ determineCorrespondences(
+ pcl::Correspondences& correspondences,
+ double max_distance = std::numeric_limits<double>::max()) = 0;
+
+ /** \brief Determine the reciprocal correspondences between input and target cloud.
+ * A correspondence is considered reciprocal if both Src_i has Tgt_i as a
+ * correspondence, and Tgt_i has Src_i as one.
+ *
+ * \param[out] correspondences the found correspondences (index of query and target
+ * point, distance) \param[in] max_distance maximum allowed distance between
+ * correspondences
+ */
+ virtual void
+ determineReciprocalCorrespondences(
+ pcl::Correspondences& correspondences,
+ double max_distance = std::numeric_limits<double>::max()) = 0;
+
+ /** \brief Provide a boost shared pointer to the PointRepresentation to be used
+ * when searching for nearest neighbors.
+ *
+ * \param[in] point_representation the PointRepresentation to be used by the
+ * k-D tree for nearest neighbor search
+ */
+ inline void
+ setPointRepresentation(const PointRepresentationConstPtr& point_representation)
+ {
+ point_representation_ = point_representation;
+ }
+
+ /** \brief Clone and cast to CorrespondenceEstimationBase */
+ virtual typename CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::Ptr
+ clone() const = 0;
+
+protected:
+ /** \brief The correspondence estimation method name. */
+ std::string corr_name_;
+
+ /** \brief A pointer to the spatial search object used for the target dataset. */
+ KdTreePtr tree_;
+
+ /** \brief A pointer to the spatial search object used for the source dataset. */
+ KdTreeReciprocalPtr tree_reciprocal_;
+
+ /** \brief The input point cloud dataset target. */
+ PointCloudTargetConstPtr target_;
+
+ /** \brief The target point cloud dataset indices. */
+ IndicesPtr target_indices_;
+
+ /** \brief The point representation used (internal). */
+ PointRepresentationConstPtr point_representation_;
+
+ /** \brief The transformed input source point cloud dataset. */
+ PointCloudTargetPtr input_transformed_;
+
+ /** \brief The types of input point fields available. */
+ std::vector<pcl::PCLPointField> input_fields_;
+
+ /** \brief Abstract class get name method. */
+ inline const std::string&
+ getClassName() const
+ {
+ return (corr_name_);
+ }
+
+ /** \brief Internal computation initialization. */
+ bool
+ initCompute();
+
+ /** \brief Internal computation initialization for reciprocal correspondences. */
+ bool
+ initComputeReciprocal();
+
+ /** \brief Variable that stores whether we have a new target cloud, meaning we need to
+ * pre-process it again. This way, we avoid rebuilding the kd-tree for the target
+ * cloud every time the determineCorrespondences () method is called. */
+ bool target_cloud_updated_;
+ /** \brief Variable that stores whether we have a new source cloud, meaning we need to
+ * pre-process it again. This way, we avoid rebuilding the reciprocal kd-tree for the
+ * source cloud every time the determineCorrespondences () method is called. */
+ bool source_cloud_updated_;
+ /** \brief A flag which, if set, means the tree operating on the target cloud
+ * will never be recomputed*/
+ bool force_no_recompute_;
+
+ /** \brief A flag which, if set, means the tree operating on the source cloud
+ * will never be recomputed*/
+ bool force_no_recompute_reciprocal_;
+};
+
+/** \brief @b CorrespondenceEstimation represents the base class for
+ * determining correspondences between target and query point
+ * sets/features.
+ *
+ * Code example:
+ *
+ * \code
+ * pcl::PointCloud<pcl::PointXYZRGBA>::Ptr source, target;
+ * // ... read or fill in source and target
+ * pcl::CorrespondenceEstimation<pcl::PointXYZ, pcl::PointXYZ> est;
+ * est.setInputSource (source);
+ * est.setInputTarget (target);
+ *
+ * pcl::Correspondences all_correspondences;
+ * // Determine all reciprocal correspondences
+ * est.determineReciprocalCorrespondences (all_correspondences);
+ * \endcode
+ *
+ * \author Radu B. Rusu, Michael Dixon, Dirk Holz
+ * \ingroup registration
+ */
+template <typename PointSource, typename PointTarget, typename Scalar = float>
+class CorrespondenceEstimation
+: public CorrespondenceEstimationBase<PointSource, PointTarget, Scalar> {
+public:
+ using Ptr = shared_ptr<CorrespondenceEstimation<PointSource, PointTarget, Scalar>>;
+ using ConstPtr =
+ shared_ptr<const CorrespondenceEstimation<PointSource, PointTarget, Scalar>>;
+
+ using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::
+ point_representation_;
+ using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::
+ input_transformed_;
+ using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::tree_;
+ using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::
+ tree_reciprocal_;
+ using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::target_;
+ using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::corr_name_;
+ using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::target_indices_;
+ using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::getClassName;
+ using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::initCompute;
+ using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::
+ initComputeReciprocal;
+ using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::input_;
+ using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::indices_;
+ using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::input_fields_;
+ using PCLBase<PointSource>::deinitCompute;
+
+ using KdTree = pcl::search::KdTree<PointTarget>;
+ using KdTreePtr = typename KdTree::Ptr;
+
+ using PointCloudSource = pcl::PointCloud<PointSource>;
+ using PointCloudSourcePtr = typename PointCloudSource::Ptr;
+ using PointCloudSourceConstPtr = typename PointCloudSource::ConstPtr;
+
+ using PointCloudTarget = pcl::PointCloud<PointTarget>;
+ using PointCloudTargetPtr = typename PointCloudTarget::Ptr;
+ using PointCloudTargetConstPtr = typename PointCloudTarget::ConstPtr;
+
+ using PointRepresentationConstPtr = typename KdTree::PointRepresentationConstPtr;
+
+ /** \brief Empty constructor. */
+ CorrespondenceEstimation() { corr_name_ = "CorrespondenceEstimation"; }
+
+ /** \brief Empty destructor */
+ ~CorrespondenceEstimation() {}
+
+ /** \brief Determine the correspondences between input and target cloud.
+ * \param[out] correspondences the found correspondences (index of query point, index
+ * of target point, distance) \param[in] max_distance maximum allowed distance between
+ * correspondences
+ */
+ void
+ determineCorrespondences(
+ pcl::Correspondences& correspondences,
+ double max_distance = std::numeric_limits<double>::max()) override;
+
+ /** \brief Determine the reciprocal correspondences between input and target cloud.
+ * A correspondence is considered reciprocal if both Src_i has Tgt_i as a
+ * correspondence, and Tgt_i has Src_i as one.
+ *
+ * \param[out] correspondences the found correspondences (index of query and target
+ * point, distance) \param[in] max_distance maximum allowed distance between
+ * correspondences
+ */
+ void
+ determineReciprocalCorrespondences(
+ pcl::Correspondences& correspondences,
+ double max_distance = std::numeric_limits<double>::max()) override;
+
+ /** \brief Clone and cast to CorrespondenceEstimationBase */
+ typename CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::Ptr
+ clone() const override
{
- /** \brief Abstract @b CorrespondenceEstimationBase class.
- * All correspondence estimation methods should inherit from this.
- * \author Radu B. Rusu
- * \ingroup registration
- */
- template <typename PointSource, typename PointTarget, typename Scalar = float>
- class CorrespondenceEstimationBase: public PCLBase<PointSource>
- {
- public:
- using Ptr = shared_ptr<CorrespondenceEstimationBase<PointSource, PointTarget, Scalar> >;
- using ConstPtr = shared_ptr<const CorrespondenceEstimationBase<PointSource, PointTarget, Scalar> >;
-
- // using PCLBase<PointSource>::initCompute;
- using PCLBase<PointSource>::deinitCompute;
- using PCLBase<PointSource>::input_;
- using PCLBase<PointSource>::indices_;
- using PCLBase<PointSource>::setIndices;
-
- using KdTree = pcl::search::KdTree<PointTarget>;
- using KdTreePtr = typename KdTree::Ptr;
-
- using KdTreeReciprocal = pcl::search::KdTree<PointSource>;
- using KdTreeReciprocalPtr = typename KdTree::Ptr;
-
- using PointCloudSource = pcl::PointCloud<PointSource>;
- using PointCloudSourcePtr = typename PointCloudSource::Ptr;
- using PointCloudSourceConstPtr = typename PointCloudSource::ConstPtr;
-
- using PointCloudTarget = pcl::PointCloud<PointTarget>;
- using PointCloudTargetPtr = typename PointCloudTarget::Ptr;
- using PointCloudTargetConstPtr = typename PointCloudTarget::ConstPtr;
-
- using PointRepresentationConstPtr = typename KdTree::PointRepresentationConstPtr;
-
- /** \brief Empty constructor. */
- CorrespondenceEstimationBase ()
- : corr_name_ ("CorrespondenceEstimationBase")
- , tree_ (new pcl::search::KdTree<PointTarget>)
- , tree_reciprocal_ (new pcl::search::KdTree<PointSource>)
- , target_ ()
- , point_representation_ ()
- , input_transformed_ ()
- , target_cloud_updated_ (true)
- , source_cloud_updated_ (true)
- , force_no_recompute_ (false)
- , force_no_recompute_reciprocal_ (false)
- {
- }
-
- /** \brief Empty destructor */
- ~CorrespondenceEstimationBase () {}
-
- /** \brief Provide a pointer to the input source
- * (e.g., the point cloud that we want to align to the target)
- *
- * \param[in] cloud the input point cloud source
- */
- inline void
- setInputSource (const PointCloudSourceConstPtr &cloud)
- {
- source_cloud_updated_ = true;
- PCLBase<PointSource>::setInputCloud (cloud);
- input_fields_ = pcl::getFields<PointSource> ();
- }
-
- /** \brief Get a pointer to the input point cloud dataset target. */
- inline PointCloudSourceConstPtr const
- getInputSource ()
- {
- return (input_ );
- }
-
- /** \brief Provide a pointer to the input target
- * (e.g., the point cloud that we want to align the input source to)
- * \param[in] cloud the input point cloud target
- */
- inline void
- setInputTarget (const PointCloudTargetConstPtr &cloud);
-
- /** \brief Get a pointer to the input point cloud dataset target. */
- inline PointCloudTargetConstPtr const
- getInputTarget () { return (target_ ); }
-
-
- /** \brief See if this rejector requires source normals */
- virtual bool
- requiresSourceNormals () const
- { return (false); }
-
- /** \brief Abstract method for setting the source normals */
- virtual void
- setSourceNormals (pcl::PCLPointCloud2::ConstPtr /*cloud2*/)
- {
- PCL_WARN ("[pcl::registration::%s::setSourceNormals] This class does not require input source normals", getClassName ().c_str ());
- }
-
- /** \brief See if this rejector requires target normals */
- virtual bool
- requiresTargetNormals () const
- { return (false); }
-
- /** \brief Abstract method for setting the target normals */
- virtual void
- setTargetNormals (pcl::PCLPointCloud2::ConstPtr /*cloud2*/)
- {
- PCL_WARN ("[pcl::registration::%s::setTargetNormals] This class does not require input target normals", getClassName ().c_str ());
- }
-
- /** \brief Provide a pointer to the vector of indices that represent the
- * input source point cloud.
- * \param[in] indices a pointer to the vector of indices
- */
- inline void
- setIndicesSource (const IndicesPtr &indices)
- {
- setIndices (indices);
- }
-
- /** \brief Get a pointer to the vector of indices used for the source dataset. */
- inline IndicesPtr const
- getIndicesSource () { return (indices_); }
-
- /** \brief Provide a pointer to the vector of indices that represent the input target point cloud.
- * \param[in] indices a pointer to the vector of indices
- */
- inline void
- setIndicesTarget (const IndicesPtr &indices)
- {
- target_cloud_updated_ = true;
- target_indices_ = indices;
- }
-
- /** \brief Get a pointer to the vector of indices used for the target dataset. */
- inline IndicesPtr const
- getIndicesTarget () { return (target_indices_); }
-
- /** \brief Provide a pointer to the search object used to find correspondences in
- * the target cloud.
- * \param[in] tree a pointer to the spatial search object.
- * \param[in] force_no_recompute If set to true, this tree will NEVER be
- * recomputed, regardless of calls to setInputTarget. Only use if you are
- * confident that the tree will be set correctly.
- */
- inline void
- setSearchMethodTarget (const KdTreePtr &tree,
- bool force_no_recompute = false)
- {
- tree_ = tree;
- if (force_no_recompute)
- {
- force_no_recompute_ = true;
- }
- // Since we just set a new tree, we need to check for updates
- target_cloud_updated_ = true;
- }
-
- /** \brief Get a pointer to the search method used to find correspondences in the
- * target cloud. */
- inline KdTreePtr
- getSearchMethodTarget () const
- {
- return (tree_);
- }
-
- /** \brief Provide a pointer to the search object used to find correspondences in
- * the source cloud (usually used by reciprocal correspondence finding).
- * \param[in] tree a pointer to the spatial search object.
- * \param[in] force_no_recompute If set to true, this tree will NEVER be
- * recomputed, regardless of calls to setInputSource. Only use if you are
- * extremely confident that the tree will be set correctly.
- */
- inline void
- setSearchMethodSource (const KdTreeReciprocalPtr &tree,
- bool force_no_recompute = false)
- {
- tree_reciprocal_ = tree;
- if ( force_no_recompute )
- {
- force_no_recompute_reciprocal_ = true;
- }
- // Since we just set a new tree, we need to check for updates
- source_cloud_updated_ = true;
- }
-
- /** \brief Get a pointer to the search method used to find correspondences in the
- * source cloud. */
- inline KdTreeReciprocalPtr
- getSearchMethodSource () const
- {
- return (tree_reciprocal_);
- }
-
- /** \brief Determine the correspondences between input and target cloud.
- * \param[out] correspondences the found correspondences (index of query point, index of target point, distance)
- * \param[in] max_distance maximum allowed distance between correspondences
- */
- virtual void
- determineCorrespondences (pcl::Correspondences &correspondences,
- double max_distance = std::numeric_limits<double>::max ()) = 0;
-
- /** \brief Determine the reciprocal correspondences between input and target cloud.
- * A correspondence is considered reciprocal if both Src_i has Tgt_i as a
- * correspondence, and Tgt_i has Src_i as one.
- *
- * \param[out] correspondences the found correspondences (index of query and target point, distance)
- * \param[in] max_distance maximum allowed distance between correspondences
- */
- virtual void
- determineReciprocalCorrespondences (pcl::Correspondences &correspondences,
- double max_distance = std::numeric_limits<double>::max ()) = 0;
-
- /** \brief Provide a boost shared pointer to the PointRepresentation to be used
- * when searching for nearest neighbors.
- *
- * \param[in] point_representation the PointRepresentation to be used by the
- * k-D tree for nearest neighbor search
- */
- inline void
- setPointRepresentation (const PointRepresentationConstPtr &point_representation)
- {
- point_representation_ = point_representation;
- }
-
- /** \brief Clone and cast to CorrespondenceEstimationBase */
- virtual typename CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::Ptr clone () const = 0;
-
- protected:
- /** \brief The correspondence estimation method name. */
- std::string corr_name_;
-
- /** \brief A pointer to the spatial search object used for the target dataset. */
- KdTreePtr tree_;
-
- /** \brief A pointer to the spatial search object used for the source dataset. */
- KdTreeReciprocalPtr tree_reciprocal_;
-
-
-
- /** \brief The input point cloud dataset target. */
- PointCloudTargetConstPtr target_;
-
- /** \brief The target point cloud dataset indices. */
- IndicesPtr target_indices_;
-
- /** \brief The point representation used (internal). */
- PointRepresentationConstPtr point_representation_;
-
- /** \brief The transformed input source point cloud dataset. */
- PointCloudTargetPtr input_transformed_;
-
- /** \brief The types of input point fields available. */
- std::vector<pcl::PCLPointField> input_fields_;
-
- /** \brief Abstract class get name method. */
- inline const std::string&
- getClassName () const { return (corr_name_); }
-
- /** \brief Internal computation initialization. */
- bool
- initCompute ();
-
- /** \brief Internal computation initialization for reciprocal correspondences. */
- bool
- initComputeReciprocal ();
-
- /** \brief Variable that stores whether we have a new target cloud, meaning we need to pre-process it again.
- * This way, we avoid rebuilding the kd-tree for the target cloud every time the determineCorrespondences () method
- * is called. */
- bool target_cloud_updated_;
- /** \brief Variable that stores whether we have a new source cloud, meaning we need to pre-process it again.
- * This way, we avoid rebuilding the reciprocal kd-tree for the source cloud every time the determineCorrespondences () method
- * is called. */
- bool source_cloud_updated_;
- /** \brief A flag which, if set, means the tree operating on the target cloud
- * will never be recomputed*/
- bool force_no_recompute_;
-
- /** \brief A flag which, if set, means the tree operating on the source cloud
- * will never be recomputed*/
- bool force_no_recompute_reciprocal_;
-
- };
-
- /** \brief @b CorrespondenceEstimation represents the base class for
- * determining correspondences between target and query point
- * sets/features.
- *
- * Code example:
- *
- * \code
- * pcl::PointCloud<pcl::PointXYZRGBA>::Ptr source, target;
- * // ... read or fill in source and target
- * pcl::CorrespondenceEstimation<pcl::PointXYZ, pcl::PointXYZ> est;
- * est.setInputSource (source);
- * est.setInputTarget (target);
- *
- * pcl::Correspondences all_correspondences;
- * // Determine all reciprocal correspondences
- * est.determineReciprocalCorrespondences (all_correspondences);
- * \endcode
- *
- * \author Radu B. Rusu, Michael Dixon, Dirk Holz
- * \ingroup registration
- */
- template <typename PointSource, typename PointTarget, typename Scalar = float>
- class CorrespondenceEstimation : public CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>
- {
- public:
- using Ptr = shared_ptr<CorrespondenceEstimation<PointSource, PointTarget, Scalar> >;
- using ConstPtr = shared_ptr<const CorrespondenceEstimation<PointSource, PointTarget, Scalar> >;
-
- using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::point_representation_;
- using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::input_transformed_;
- using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::tree_;
- using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::tree_reciprocal_;
- using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::target_;
- using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::corr_name_;
- using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::target_indices_;
- using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::getClassName;
- using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::initCompute;
- using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::initComputeReciprocal;
- using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::input_;
- using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::indices_;
- using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::input_fields_;
- using PCLBase<PointSource>::deinitCompute;
-
- using KdTree = pcl::search::KdTree<PointTarget>;
- using KdTreePtr = typename KdTree::Ptr;
-
- using PointCloudSource = pcl::PointCloud<PointSource>;
- using PointCloudSourcePtr = typename PointCloudSource::Ptr;
- using PointCloudSourceConstPtr = typename PointCloudSource::ConstPtr;
-
- using PointCloudTarget = pcl::PointCloud<PointTarget>;
- using PointCloudTargetPtr = typename PointCloudTarget::Ptr;
- using PointCloudTargetConstPtr = typename PointCloudTarget::ConstPtr;
-
- using PointRepresentationConstPtr = typename KdTree::PointRepresentationConstPtr;
-
- /** \brief Empty constructor. */
- CorrespondenceEstimation ()
- {
- corr_name_ = "CorrespondenceEstimation";
- }
-
- /** \brief Empty destructor */
- ~CorrespondenceEstimation () {}
-
- /** \brief Determine the correspondences between input and target cloud.
- * \param[out] correspondences the found correspondences (index of query point, index of target point, distance)
- * \param[in] max_distance maximum allowed distance between correspondences
- */
- void
- determineCorrespondences (pcl::Correspondences &correspondences,
- double max_distance = std::numeric_limits<double>::max ()) override;
-
- /** \brief Determine the reciprocal correspondences between input and target cloud.
- * A correspondence is considered reciprocal if both Src_i has Tgt_i as a
- * correspondence, and Tgt_i has Src_i as one.
- *
- * \param[out] correspondences the found correspondences (index of query and target point, distance)
- * \param[in] max_distance maximum allowed distance between correspondences
- */
- void
- determineReciprocalCorrespondences (pcl::Correspondences &correspondences,
- double max_distance = std::numeric_limits<double>::max ()) override;
-
-
- /** \brief Clone and cast to CorrespondenceEstimationBase */
- typename CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::Ptr
- clone () const override
- {
- Ptr copy (new CorrespondenceEstimation<PointSource, PointTarget, Scalar> (*this));
- return (copy);
- }
- };
+ Ptr copy(new CorrespondenceEstimation<PointSource, PointTarget, Scalar>(*this));
+ return (copy);
}
-}
+};
+} // namespace registration
+} // namespace pcl
#include <pcl/registration/impl/correspondence_estimation.hpp>
#pragma once
-#include <pcl/registration/correspondence_types.h>
#include <pcl/registration/correspondence_estimation.h>
+#include <pcl/registration/correspondence_types.h>
+
+namespace pcl {
+namespace registration {
+/** \brief @b CorrespondenceEstimationBackprojection computes
+ * correspondences as points in the target cloud which have minimum
+ * \author Suat Gedikli
+ * \ingroup registration
+ */
+template <typename PointSource,
+ typename PointTarget,
+ typename NormalT,
+ typename Scalar = float>
+class CorrespondenceEstimationBackProjection
+: public CorrespondenceEstimationBase<PointSource, PointTarget, Scalar> {
+public:
+ using Ptr = shared_ptr<CorrespondenceEstimationBackProjection<PointSource,
+ PointTarget,
+ NormalT,
+ Scalar>>;
+ using ConstPtr = shared_ptr<const CorrespondenceEstimationBackProjection<PointSource,
+ PointTarget,
+ NormalT,
+ Scalar>>;
+
+ using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::initCompute;
+ using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::
+ initComputeReciprocal;
+ using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::
+ input_transformed_;
+ using PCLBase<PointSource>::deinitCompute;
+ using PCLBase<PointSource>::input_;
+ using PCLBase<PointSource>::indices_;
+ using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::getClassName;
+ using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::
+ point_representation_;
+ using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::target_indices_;
+
+ using KdTree = pcl::search::KdTree<PointTarget>;
+ using KdTreePtr = typename KdTree::Ptr;
+
+ using PointCloudSource = pcl::PointCloud<PointSource>;
+ using PointCloudSourcePtr = typename PointCloudSource::Ptr;
+ using PointCloudSourceConstPtr = typename PointCloudSource::ConstPtr;
+
+ using PointCloudTarget = pcl::PointCloud<PointTarget>;
+ using PointCloudTargetPtr = typename PointCloudTarget::Ptr;
+ using PointCloudTargetConstPtr = typename PointCloudTarget::ConstPtr;
+
+ using PointCloudNormals = pcl::PointCloud<NormalT>;
+ using NormalsPtr = typename PointCloudNormals::Ptr;
+ using NormalsConstPtr = typename PointCloudNormals::ConstPtr;
+
+ /** \brief Empty constructor.
+ *
+ * \note
+ * Sets the number of neighbors to be considered in the target point cloud (k_) to 10.
+ */
+ CorrespondenceEstimationBackProjection()
+ : source_normals_(), source_normals_transformed_(), target_normals_(), k_(10)
+ {
+ corr_name_ = "CorrespondenceEstimationBackProjection";
+ }
+
+ /** \brief Empty destructor */
+ virtual ~CorrespondenceEstimationBackProjection() {}
-namespace pcl
-{
- namespace registration
+ /** \brief Set the normals computed on the source point cloud
+ * \param[in] normals the normals computed for the source cloud
+ */
+ inline void
+ setSourceNormals(const NormalsConstPtr& normals)
{
- /** \brief @b CorrespondenceEstimationBackprojection computes
- * correspondences as points in the target cloud which have minimum
- * \author Suat Gedikli
- * \ingroup registration
- */
- template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar = float>
- class CorrespondenceEstimationBackProjection : public CorrespondenceEstimationBase <PointSource, PointTarget, Scalar>
- {
- public:
- using Ptr = shared_ptr<CorrespondenceEstimationBackProjection<PointSource, PointTarget, NormalT, Scalar> >;
- using ConstPtr = shared_ptr<const CorrespondenceEstimationBackProjection<PointSource, PointTarget, NormalT, Scalar> >;
-
- using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::initCompute;
- using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::initComputeReciprocal;
- using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::input_transformed_;
- using PCLBase<PointSource>::deinitCompute;
- using PCLBase<PointSource>::input_;
- using PCLBase<PointSource>::indices_;
- using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::getClassName;
- using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::point_representation_;
- using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::target_indices_;
-
- using KdTree = pcl::search::KdTree<PointTarget>;
- using KdTreePtr = typename KdTree::Ptr;
-
- using PointCloudSource = pcl::PointCloud<PointSource>;
- using PointCloudSourcePtr = typename PointCloudSource::Ptr;
- using PointCloudSourceConstPtr = typename PointCloudSource::ConstPtr;
-
- using PointCloudTarget = pcl::PointCloud<PointTarget>;
- using PointCloudTargetPtr = typename PointCloudTarget::Ptr;
- using PointCloudTargetConstPtr = typename PointCloudTarget::ConstPtr;
-
- using PointCloudNormals = pcl::PointCloud<NormalT>;
- using NormalsPtr = typename PointCloudNormals::Ptr;
- using NormalsConstPtr = typename PointCloudNormals::ConstPtr;
-
- /** \brief Empty constructor.
- *
- * \note
- * Sets the number of neighbors to be considered in the target point cloud (k_) to 10.
- */
- CorrespondenceEstimationBackProjection ()
- : source_normals_ ()
- , source_normals_transformed_ ()
- , target_normals_ ()
- , k_ (10)
- {
- corr_name_ = "CorrespondenceEstimationBackProjection";
- }
-
- /** \brief Empty destructor */
- virtual ~CorrespondenceEstimationBackProjection () {}
-
- /** \brief Set the normals computed on the source point cloud
- * \param[in] normals the normals computed for the source cloud
- */
- inline void
- setSourceNormals (const NormalsConstPtr &normals) { source_normals_ = normals; }
-
- /** \brief Get the normals of the source point cloud
- */
- inline NormalsConstPtr
- getSourceNormals () const { return (source_normals_); }
-
- /** \brief Set the normals computed on the target point cloud
- * \param[in] normals the normals computed for the target cloud
- */
- inline void
- setTargetNormals (const NormalsConstPtr &normals) { target_normals_ = normals; }
-
- /** \brief Get the normals of the target point cloud
- */
- inline NormalsConstPtr
- getTargetNormals () const { return (target_normals_); }
-
-
- /** \brief See if this rejector requires source normals */
- bool
- requiresSourceNormals () const
- { return (true); }
-
- /** \brief Blob method for setting the source normals */
- void
- setSourceNormals (pcl::PCLPointCloud2::ConstPtr cloud2)
- {
- NormalsPtr cloud (new PointCloudNormals);
- fromPCLPointCloud2 (*cloud2, *cloud);
- setSourceNormals (cloud);
- }
-
- /** \brief See if this rejector requires target normals*/
- bool
- requiresTargetNormals () const
- { return (true); }
-
- /** \brief Method for setting the target normals */
- void
- setTargetNormals (pcl::PCLPointCloud2::ConstPtr cloud2)
- {
- NormalsPtr cloud (new PointCloudNormals);
- fromPCLPointCloud2 (*cloud2, *cloud);
- setTargetNormals (cloud);
- }
-
- /** \brief Determine the correspondences between input and target cloud.
- * \param[out] correspondences the found correspondences (index of query point, index of target point, distance)
- * \param[in] max_distance maximum distance between the normal on the source point cloud and the corresponding point in the target
- * point cloud
- */
- void
- determineCorrespondences (pcl::Correspondences &correspondences,
- double max_distance = std::numeric_limits<double>::max ());
-
- /** \brief Determine the reciprocal correspondences between input and target cloud.
- * A correspondence is considered reciprocal if both Src_i has Tgt_i as a
- * correspondence, and Tgt_i has Src_i as one.
- *
- * \param[out] correspondences the found correspondences (index of query and target point, distance)
- * \param[in] max_distance maximum allowed distance between correspondences
- */
- virtual void
- determineReciprocalCorrespondences (pcl::Correspondences &correspondences,
- double max_distance = std::numeric_limits<double>::max ());
-
- /** \brief Set the number of nearest neighbours to be considered in the target
- * point cloud. By default, we use k = 10 nearest neighbors.
- *
- * \param[in] k the number of nearest neighbours to be considered
- */
- inline void
- setKSearch (unsigned int k) { k_ = k; }
-
- /** \brief Get the number of nearest neighbours considered in the target point
- * cloud for computing correspondences. By default we use k = 10 nearest
- * neighbors.
- */
- inline unsigned int
- getKSearch () const { return (k_); }
-
- /** \brief Clone and cast to CorrespondenceEstimationBase */
- virtual typename CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::Ptr
- clone () const
- {
- Ptr copy (new CorrespondenceEstimationBackProjection<PointSource, PointTarget, NormalT, Scalar> (*this));
- return (copy);
- }
-
- protected:
-
- using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::corr_name_;
- using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::tree_;
- using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::tree_reciprocal_;
- using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::target_;
-
- /** \brief Internal computation initialization. */
- bool
- initCompute ();
-
- private:
-
- /** \brief The normals computed at each point in the source cloud */
- NormalsConstPtr source_normals_;
-
- /** \brief The normals computed at each point in the source cloud */
- NormalsPtr source_normals_transformed_;
-
- /** \brief The normals computed at each point in the target cloud */
- NormalsConstPtr target_normals_;
-
- /** \brief The number of neighbours to be considered in the target point cloud */
- unsigned int k_;
- };
+ source_normals_ = normals;
}
-}
+
+ /** \brief Get the normals of the source point cloud
+ */
+ inline NormalsConstPtr
+ getSourceNormals() const
+ {
+ return (source_normals_);
+ }
+
+ /** \brief Set the normals computed on the target point cloud
+ * \param[in] normals the normals computed for the target cloud
+ */
+ inline void
+ setTargetNormals(const NormalsConstPtr& normals)
+ {
+ target_normals_ = normals;
+ }
+
+ /** \brief Get the normals of the target point cloud
+ */
+ inline NormalsConstPtr
+ getTargetNormals() const
+ {
+ return (target_normals_);
+ }
+
+ /** \brief See if this rejector requires source normals */
+ bool
+ requiresSourceNormals() const
+ {
+ return (true);
+ }
+
+ /** \brief Blob method for setting the source normals */
+ void
+ setSourceNormals(pcl::PCLPointCloud2::ConstPtr cloud2)
+ {
+ NormalsPtr cloud(new PointCloudNormals);
+ fromPCLPointCloud2(*cloud2, *cloud);
+ setSourceNormals(cloud);
+ }
+
+ /** \brief See if this rejector requires target normals*/
+ bool
+ requiresTargetNormals() const
+ {
+ return (true);
+ }
+
+ /** \brief Method for setting the target normals */
+ void
+ setTargetNormals(pcl::PCLPointCloud2::ConstPtr cloud2)
+ {
+ NormalsPtr cloud(new PointCloudNormals);
+ fromPCLPointCloud2(*cloud2, *cloud);
+ setTargetNormals(cloud);
+ }
+
+ /** \brief Determine the correspondences between input and target cloud.
+ * \param[out] correspondences the found correspondences (index of query point, index
+ * of target point, distance) \param[in] max_distance maximum distance between the
+ * normal on the source point cloud and the corresponding point in the target point
+ * cloud
+ */
+ void
+ determineCorrespondences(pcl::Correspondences& correspondences,
+ double max_distance = std::numeric_limits<double>::max());
+
+ /** \brief Determine the reciprocal correspondences between input and target cloud.
+ * A correspondence is considered reciprocal if both Src_i has Tgt_i as a
+ * correspondence, and Tgt_i has Src_i as one.
+ *
+ * \param[out] correspondences the found correspondences (index of query and target
+ * point, distance) \param[in] max_distance maximum allowed distance between
+ * correspondences
+ */
+ virtual void
+ determineReciprocalCorrespondences(
+ pcl::Correspondences& correspondences,
+ double max_distance = std::numeric_limits<double>::max());
+
+ /** \brief Set the number of nearest neighbours to be considered in the target
+ * point cloud. By default, we use k = 10 nearest neighbors.
+ *
+ * \param[in] k the number of nearest neighbours to be considered
+ */
+ inline void
+ setKSearch(unsigned int k)
+ {
+ k_ = k;
+ }
+
+ /** \brief Get the number of nearest neighbours considered in the target point
+ * cloud for computing correspondences. By default we use k = 10 nearest
+ * neighbors.
+ */
+ inline unsigned int
+ getKSearch() const
+ {
+ return (k_);
+ }
+
+ /** \brief Clone and cast to CorrespondenceEstimationBase */
+ virtual typename CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::Ptr
+ clone() const
+ {
+ Ptr copy(new CorrespondenceEstimationBackProjection<PointSource,
+ PointTarget,
+ NormalT,
+ Scalar>(*this));
+ return (copy);
+ }
+
+protected:
+ using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::corr_name_;
+ using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::tree_;
+ using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::
+ tree_reciprocal_;
+ using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::target_;
+
+ /** \brief Internal computation initialization. */
+ bool
+ initCompute();
+
+private:
+ /** \brief The normals computed at each point in the source cloud */
+ NormalsConstPtr source_normals_;
+
+ /** \brief The normals computed at each point in the source cloud */
+ NormalsPtr source_normals_transformed_;
+
+ /** \brief The normals computed at each point in the target cloud */
+ NormalsConstPtr target_normals_;
+
+ /** \brief The number of neighbours to be considered in the target point cloud */
+ unsigned int k_;
+};
+} // namespace registration
+} // namespace pcl
#include <pcl/registration/impl/correspondence_estimation_backprojection.hpp>
#pragma once
-#include <pcl/registration/correspondence_types.h>
#include <pcl/registration/correspondence_estimation.h>
+#include <pcl/registration/correspondence_types.h>
+
+namespace pcl {
+namespace registration {
+/** \brief @b CorrespondenceEstimationNormalShooting computes
+ * correspondences as points in the target cloud which have minimum
+ * distance to normals computed on the input cloud
+ *
+ * Code example:
+ *
+ * \code
+ * pcl::PointCloud<pcl::PointNormal>::Ptr source, target;
+ * // ... read or fill in source and target
+ * pcl::CorrespondenceEstimationNormalShooting
+ * <pcl::PointNormal, pcl::PointNormal, pcl::PointNormal> est;
+ * est.setInputSource (source);
+ * est.setSourceNormals (source);
+ *
+ * est.setInputTarget (target);
+ *
+ * // Test the first 10 correspondences for each point in source, and return the best
+ * est.setKSearch (10);
+ *
+ * pcl::Correspondences all_correspondences;
+ * // Determine all correspondences
+ * est.determineCorrespondences (all_correspondences);
+ * \endcode
+ *
+ * \author Aravindhan K. Krishnan, Radu B. Rusu
+ * \ingroup registration
+ */
+template <typename PointSource,
+ typename PointTarget,
+ typename NormalT,
+ typename Scalar = float>
+class CorrespondenceEstimationNormalShooting
+: public CorrespondenceEstimationBase<PointSource, PointTarget, Scalar> {
+public:
+ using Ptr = shared_ptr<CorrespondenceEstimationNormalShooting<PointSource,
+ PointTarget,
+ NormalT,
+ Scalar>>;
+ using ConstPtr = shared_ptr<const CorrespondenceEstimationNormalShooting<PointSource,
+ PointTarget,
+ NormalT,
+ Scalar>>;
+
+ using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::initCompute;
+ using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::
+ initComputeReciprocal;
+ using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::
+ input_transformed_;
+ using PCLBase<PointSource>::deinitCompute;
+ using PCLBase<PointSource>::input_;
+ using PCLBase<PointSource>::indices_;
+ using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::getClassName;
+ using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::
+ point_representation_;
+ using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::target_indices_;
+
+ using KdTree = pcl::search::KdTree<PointTarget>;
+ using KdTreePtr = typename KdTree::Ptr;
+
+ using PointCloudSource = pcl::PointCloud<PointSource>;
+ using PointCloudSourcePtr = typename PointCloudSource::Ptr;
+ using PointCloudSourceConstPtr = typename PointCloudSource::ConstPtr;
+
+ using PointCloudTarget = pcl::PointCloud<PointTarget>;
+ using PointCloudTargetPtr = typename PointCloudTarget::Ptr;
+ using PointCloudTargetConstPtr = typename PointCloudTarget::ConstPtr;
+
+ using PointCloudNormals = pcl::PointCloud<NormalT>;
+ using NormalsPtr = typename PointCloudNormals::Ptr;
+ using NormalsConstPtr = typename PointCloudNormals::ConstPtr;
+
+ /** \brief Empty constructor.
+ *
+ * \note
+ * Sets the number of neighbors to be considered in the target point cloud (k_) to 10.
+ */
+ CorrespondenceEstimationNormalShooting()
+ : source_normals_(), source_normals_transformed_(), k_(10)
+ {
+ corr_name_ = "CorrespondenceEstimationNormalShooting";
+ }
+
+ /** \brief Empty destructor */
+ ~CorrespondenceEstimationNormalShooting() {}
+
+ /** \brief Set the normals computed on the source point cloud
+ * \param[in] normals the normals computed for the source cloud
+ */
+ inline void
+ setSourceNormals(const NormalsConstPtr& normals)
+ {
+ source_normals_ = normals;
+ }
-namespace pcl
-{
- namespace registration
+ /** \brief Get the normals of the source point cloud
+ */
+ inline NormalsConstPtr
+ getSourceNormals() const
{
- /** \brief @b CorrespondenceEstimationNormalShooting computes
- * correspondences as points in the target cloud which have minimum
- * distance to normals computed on the input cloud
- *
- * Code example:
- *
- * \code
- * pcl::PointCloud<pcl::PointNormal>::Ptr source, target;
- * // ... read or fill in source and target
- * pcl::CorrespondenceEstimationNormalShooting<pcl::PointNormal, pcl::PointNormal, pcl::PointNormal> est;
- * est.setInputSource (source);
- * est.setSourceNormals (source);
- *
- * est.setInputTarget (target);
- *
- * // Test the first 10 correspondences for each point in source, and return the best
- * est.setKSearch (10);
- *
- * pcl::Correspondences all_correspondences;
- * // Determine all correspondences
- * est.determineCorrespondences (all_correspondences);
- * \endcode
- *
- * \author Aravindhan K. Krishnan, Radu B. Rusu
- * \ingroup registration
- */
- template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar = float>
- class CorrespondenceEstimationNormalShooting : public CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>
- {
- public:
- using Ptr = shared_ptr<CorrespondenceEstimationNormalShooting<PointSource, PointTarget, NormalT, Scalar> >;
- using ConstPtr = shared_ptr<const CorrespondenceEstimationNormalShooting<PointSource, PointTarget, NormalT, Scalar> >;
-
- using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::initCompute;
- using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::initComputeReciprocal;
- using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::input_transformed_;
- using PCLBase<PointSource>::deinitCompute;
- using PCLBase<PointSource>::input_;
- using PCLBase<PointSource>::indices_;
- using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::getClassName;
- using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::point_representation_;
- using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::target_indices_;
-
- using KdTree = pcl::search::KdTree<PointTarget>;
- using KdTreePtr = typename KdTree::Ptr;
-
- using PointCloudSource = pcl::PointCloud<PointSource>;
- using PointCloudSourcePtr = typename PointCloudSource::Ptr;
- using PointCloudSourceConstPtr = typename PointCloudSource::ConstPtr;
-
- using PointCloudTarget = pcl::PointCloud<PointTarget>;
- using PointCloudTargetPtr = typename PointCloudTarget::Ptr;
- using PointCloudTargetConstPtr = typename PointCloudTarget::ConstPtr;
-
- using PointCloudNormals = pcl::PointCloud<NormalT>;
- using NormalsPtr = typename PointCloudNormals::Ptr;
- using NormalsConstPtr = typename PointCloudNormals::ConstPtr;
-
- /** \brief Empty constructor.
- *
- * \note
- * Sets the number of neighbors to be considered in the target point cloud (k_) to 10.
- */
- CorrespondenceEstimationNormalShooting ()
- : source_normals_ ()
- , source_normals_transformed_ ()
- , k_ (10)
- {
- corr_name_ = "CorrespondenceEstimationNormalShooting";
- }
-
- /** \brief Empty destructor */
- ~CorrespondenceEstimationNormalShooting () {}
-
- /** \brief Set the normals computed on the source point cloud
- * \param[in] normals the normals computed for the source cloud
- */
- inline void
- setSourceNormals (const NormalsConstPtr &normals) { source_normals_ = normals; }
-
- /** \brief Get the normals of the source point cloud
- */
- inline NormalsConstPtr
- getSourceNormals () const { return (source_normals_); }
-
-
- /** \brief See if this rejector requires source normals */
- bool
- requiresSourceNormals () const override
- { return (true); }
-
- /** \brief Blob method for setting the source normals */
- void
- setSourceNormals (pcl::PCLPointCloud2::ConstPtr cloud2) override
- {
- NormalsPtr cloud (new PointCloudNormals);
- fromPCLPointCloud2 (*cloud2, *cloud);
- setSourceNormals (cloud);
- }
-
- /** \brief Determine the correspondences between input and target cloud.
- * \param[out] correspondences the found correspondences (index of query point, index of target point, distance)
- * \param[in] max_distance maximum distance between the normal on the source point cloud and the corresponding point in the target
- * point cloud
- */
- void
- determineCorrespondences (pcl::Correspondences &correspondences,
- double max_distance = std::numeric_limits<double>::max ()) override;
-
- /** \brief Determine the reciprocal correspondences between input and target cloud.
- * A correspondence is considered reciprocal if both Src_i has Tgt_i as a
- * correspondence, and Tgt_i has Src_i as one.
- *
- * \param[out] correspondences the found correspondences (index of query and target point, distance)
- * \param[in] max_distance maximum allowed distance between correspondences
- */
- void
- determineReciprocalCorrespondences (pcl::Correspondences &correspondences,
- double max_distance = std::numeric_limits<double>::max ()) override;
-
- /** \brief Set the number of nearest neighbours to be considered in the target
- * point cloud. By default, we use k = 10 nearest neighbors.
- *
- * \param[in] k the number of nearest neighbours to be considered
- */
- inline void
- setKSearch (unsigned int k) { k_ = k; }
-
- /** \brief Get the number of nearest neighbours considered in the target point
- * cloud for computing correspondences. By default we use k = 10 nearest
- * neighbors.
- */
- inline unsigned int
- getKSearch () const { return (k_); }
-
- /** \brief Clone and cast to CorrespondenceEstimationBase */
- typename CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::Ptr
- clone () const override
- {
- Ptr copy (new CorrespondenceEstimationNormalShooting<PointSource, PointTarget, NormalT, Scalar> (*this));
- return (copy);
- }
-
- protected:
-
- using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::corr_name_;
- using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::tree_;
- using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::tree_reciprocal_;
- using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::target_;
-
- /** \brief Internal computation initialization. */
- bool
- initCompute ();
-
- private:
-
- /** \brief The normals computed at each point in the source cloud */
- NormalsConstPtr source_normals_;
-
- /** \brief The normals computed at each point in the source cloud */
- NormalsPtr source_normals_transformed_;
-
- /** \brief The number of neighbours to be considered in the target point cloud */
- unsigned int k_;
- };
+ return (source_normals_);
}
-}
+
+ /** \brief See if this rejector requires source normals */
+ bool
+ requiresSourceNormals() const override
+ {
+ return (true);
+ }
+
+ /** \brief Blob method for setting the source normals */
+ void
+ setSourceNormals(pcl::PCLPointCloud2::ConstPtr cloud2) override
+ {
+ NormalsPtr cloud(new PointCloudNormals);
+ fromPCLPointCloud2(*cloud2, *cloud);
+ setSourceNormals(cloud);
+ }
+
+ /** \brief Determine the correspondences between input and target cloud.
+ * \param[out] correspondences the found correspondences (index of query point, index
+ * of target point, distance) \param[in] max_distance maximum distance between the
+ * normal on the source point cloud and the corresponding point in the target point
+ * cloud
+ */
+ void
+ determineCorrespondences(
+ pcl::Correspondences& correspondences,
+ double max_distance = std::numeric_limits<double>::max()) override;
+
+ /** \brief Determine the reciprocal correspondences between input and target cloud.
+ * A correspondence is considered reciprocal if both Src_i has Tgt_i as a
+ * correspondence, and Tgt_i has Src_i as one.
+ *
+ * \param[out] correspondences the found correspondences (index of query and target
+ * point, distance) \param[in] max_distance maximum allowed distance between
+ * correspondences
+ */
+ void
+ determineReciprocalCorrespondences(
+ pcl::Correspondences& correspondences,
+ double max_distance = std::numeric_limits<double>::max()) override;
+
+ /** \brief Set the number of nearest neighbours to be considered in the target
+ * point cloud. By default, we use k = 10 nearest neighbors.
+ *
+ * \param[in] k the number of nearest neighbours to be considered
+ */
+ inline void
+ setKSearch(unsigned int k)
+ {
+ k_ = k;
+ }
+
+ /** \brief Get the number of nearest neighbours considered in the target point
+ * cloud for computing correspondences. By default we use k = 10 nearest
+ * neighbors.
+ */
+ inline unsigned int
+ getKSearch() const
+ {
+ return (k_);
+ }
+
+ /** \brief Clone and cast to CorrespondenceEstimationBase */
+ typename CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::Ptr
+ clone() const override
+ {
+ Ptr copy(new CorrespondenceEstimationNormalShooting<PointSource,
+ PointTarget,
+ NormalT,
+ Scalar>(*this));
+ return (copy);
+ }
+
+protected:
+ using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::corr_name_;
+ using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::tree_;
+ using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::
+ tree_reciprocal_;
+ using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::target_;
+
+ /** \brief Internal computation initialization. */
+ bool
+ initCompute();
+
+private:
+ /** \brief The normals computed at each point in the source cloud */
+ NormalsConstPtr source_normals_;
+
+ /** \brief The normals computed at each point in the source cloud */
+ NormalsPtr source_normals_transformed_;
+
+ /** \brief The number of neighbours to be considered in the target point cloud */
+ unsigned int k_;
+};
+} // namespace registration
+} // namespace pcl
#include <pcl/registration/impl/correspondence_estimation_normal_shooting.hpp>
#pragma once
+#include <pcl/registration/correspondence_estimation.h>
#include <pcl/memory.h>
#include <pcl/pcl_macros.h>
-#include <pcl/registration/correspondence_estimation.h>
-namespace pcl
-{
- namespace registration
+namespace pcl {
+namespace registration {
+/** \brief CorrespondenceEstimationOrganizedProjection computes correspondences by
+ * projecting the source point cloud onto the target point cloud using the camera
+ * intrinsic and extrinsic parameters. The correspondences can be trimmed by a depth
+ * threshold and by a distance threshold. It is not as precise as a nearest neighbor
+ * search, but it is much faster, as it avoids the usage of any additional structures
+ * (i.e., kd-trees). \note The target point cloud must be organized (no restrictions on
+ * the source) and the target point cloud must be given in the camera coordinate frame.
+ * Any other transformation is specified by the src_to_tgt_transformation_ variable.
+ * \author Alex Ichim
+ * \ingroup registration
+ */
+template <typename PointSource, typename PointTarget, typename Scalar = float>
+class CorrespondenceEstimationOrganizedProjection
+: public CorrespondenceEstimationBase<PointSource, PointTarget, Scalar> {
+public:
+ using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::initCompute;
+ using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::
+ input_transformed_;
+ using PCLBase<PointSource>::deinitCompute;
+ using PCLBase<PointSource>::input_;
+ using PCLBase<PointSource>::indices_;
+ using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::getClassName;
+ using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::
+ point_representation_;
+ using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::
+ target_cloud_updated_;
+
+ using PointCloudSource = pcl::PointCloud<PointSource>;
+ using PointCloudSourcePtr = typename PointCloudSource::Ptr;
+ using PointCloudSourceConstPtr = typename PointCloudSource::ConstPtr;
+
+ using PointCloudTarget = pcl::PointCloud<PointTarget>;
+ using PointCloudTargetPtr = typename PointCloudTarget::Ptr;
+ using PointCloudTargetConstPtr = typename PointCloudTarget::ConstPtr;
+
+ using Ptr = shared_ptr<
+ CorrespondenceEstimationOrganizedProjection<PointSource, PointTarget, Scalar>>;
+ using ConstPtr =
+ shared_ptr<const CorrespondenceEstimationOrganizedProjection<PointSource,
+ PointTarget,
+ Scalar>>;
+
+ /** \brief Empty constructor that sets all the intrinsic calibration to the default
+ * Kinect values. */
+ CorrespondenceEstimationOrganizedProjection()
+ : fx_(525.f)
+ , fy_(525.f)
+ , cx_(319.5f)
+ , cy_(239.5f)
+ , src_to_tgt_transformation_(Eigen::Matrix4f::Identity())
+ , depth_threshold_(std::numeric_limits<float>::max())
+ , projection_matrix_(Eigen::Matrix3f::Identity())
+ {}
+
+ /** \brief Sets the focal length parameters of the target camera.
+ * \param[in] fx the focal length in pixels along the x-axis of the image
+ * \param[in] fy the focal length in pixels along the y-axis of the image
+ */
+ inline void
+ setFocalLengths(const float fx, const float fy)
+ {
+ fx_ = fx;
+ fy_ = fy;
+ }
+
+ /** \brief Reads back the focal length parameters of the target camera.
+ * \param[out] fx the focal length in pixels along the x-axis of the image
+ * \param[out] fy the focal length in pixels along the y-axis of the image
+ */
+ inline void
+ getFocalLengths(float& fx, float& fy) const
+ {
+ fx = fx_;
+ fy = fy_;
+ }
+
+ /** \brief Sets the camera center parameters of the target camera.
+ * \param[in] cx the x-coordinate of the camera center
+ * \param[in] cy the y-coordinate of the camera center
+ */
+ inline void
+ setCameraCenters(const float cx, const float cy)
+ {
+ cx_ = cx;
+ cy_ = cy;
+ }
+
+ /** \brief Reads back the camera center parameters of the target camera.
+ * \param[out] cx the x-coordinate of the camera center
+ * \param[out] cy the y-coordinate of the camera center
+ */
+ inline void
+ getCameraCenters(float& cx, float& cy) const
+ {
+ cx = cx_;
+ cy = cy_;
+ }
+
+ /** \brief Sets the transformation from the source point cloud to the target point
+ * cloud. \note The target point cloud must be in its local camera coordinates, so use
+ * this transformation to correct for that. \param[in] src_to_tgt_transformation the
+ * transformation
+ */
+ inline void
+ setSourceTransformation(const Eigen::Matrix4f& src_to_tgt_transformation)
+ {
+ src_to_tgt_transformation_ = src_to_tgt_transformation;
+ }
+
+ /** \brief Reads back the transformation from the source point cloud to the target
+ * point cloud. \note The target point cloud must be in its local camera coordinates,
+ * so use this transformation to correct for that. \return the transformation
+ */
+ inline Eigen::Matrix4f
+ getSourceTransformation() const
+ {
+ return (src_to_tgt_transformation_);
+ }
+
+ /** \brief Sets the depth threshold; after projecting the source points in the image
+ * space of the target camera, this threshold is applied on the depths of
+ * corresponding dexels to eliminate the ones that are too far from each other.
+ * \param[in] depth_threshold the depth threshold
+ */
+ inline void
+ setDepthThreshold(const float depth_threshold)
+ {
+ depth_threshold_ = depth_threshold;
+ }
+
+ /** \brief Reads back the depth threshold; after projecting the source points in the
+ * image space of the target camera, this threshold is applied on the depths of
+ * corresponding dexels to eliminate the ones that are too far from each other.
+ * \return the depth threshold
+ */
+ inline float
+ getDepthThreshold() const
{
- /** \brief CorrespondenceEstimationOrganizedProjection computes correspondences by projecting the source point cloud
- * onto the target point cloud using the camera intrinsic and extrinsic parameters. The correspondences can be trimmed
- * by a depth threshold and by a distance threshold.
- * It is not as precise as a nearest neighbor search, but it is much faster, as it avoids the usage of any additional
- * structures (i.e., kd-trees).
- * \note The target point cloud must be organized (no restrictions on the source) and the target point cloud must be
- * given in the camera coordinate frame. Any other transformation is specified by the src_to_tgt_transformation_
- * variable.
- * \author Alex Ichim
- */
- template <typename PointSource, typename PointTarget, typename Scalar = float>
- class CorrespondenceEstimationOrganizedProjection : public CorrespondenceEstimationBase <PointSource, PointTarget, Scalar>
- {
- public:
- using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::initCompute;
- using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::input_transformed_;
- using PCLBase<PointSource>::deinitCompute;
- using PCLBase<PointSource>::input_;
- using PCLBase<PointSource>::indices_;
- using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::getClassName;
- using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::point_representation_;
- using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::target_cloud_updated_;
-
- using PointCloudSource = pcl::PointCloud<PointSource>;
- using PointCloudSourcePtr = typename PointCloudSource::Ptr;
- using PointCloudSourceConstPtr = typename PointCloudSource::ConstPtr;
-
- using PointCloudTarget = pcl::PointCloud<PointTarget>;
- using PointCloudTargetPtr = typename PointCloudTarget::Ptr;
- using PointCloudTargetConstPtr = typename PointCloudTarget::ConstPtr;
-
- using Ptr = shared_ptr< CorrespondenceEstimationOrganizedProjection<PointSource, PointTarget, Scalar> >;
- using ConstPtr = shared_ptr< const CorrespondenceEstimationOrganizedProjection<PointSource, PointTarget, Scalar> >;
-
-
-
- /** \brief Empty constructor that sets all the intrinsic calibration to the default Kinect values. */
- CorrespondenceEstimationOrganizedProjection ()
- : fx_ (525.f)
- , fy_ (525.f)
- , cx_ (319.5f)
- , cy_ (239.5f)
- , src_to_tgt_transformation_ (Eigen::Matrix4f::Identity ())
- , depth_threshold_ (std::numeric_limits<float>::max ())
- , projection_matrix_ (Eigen::Matrix3f::Identity ())
- { }
-
-
- /** \brief Sets the focal length parameters of the target camera.
- * \param[in] fx the focal length in pixels along the x-axis of the image
- * \param[in] fy the focal length in pixels along the y-axis of the image
- */
- inline void
- setFocalLengths (const float fx, const float fy)
- { fx_ = fx; fy_ = fy; }
-
- /** \brief Reads back the focal length parameters of the target camera.
- * \param[out] fx the focal length in pixels along the x-axis of the image
- * \param[out] fy the focal length in pixels along the y-axis of the image
- */
- inline void
- getFocalLengths (float &fx, float &fy) const
- { fx = fx_; fy = fy_; }
-
-
- /** \brief Sets the camera center parameters of the target camera.
- * \param[in] cx the x-coordinate of the camera center
- * \param[in] cy the y-coordinate of the camera center
- */
- inline void
- setCameraCenters (const float cx, const float cy)
- { cx_ = cx; cy_ = cy; }
-
- /** \brief Reads back the camera center parameters of the target camera.
- * \param[out] cx the x-coordinate of the camera center
- * \param[out] cy the y-coordinate of the camera center
- */
- inline void
- getCameraCenters (float &cx, float &cy) const
- { cx = cx_; cy = cy_; }
-
- /** \brief Sets the transformation from the source point cloud to the target point cloud.
- * \note The target point cloud must be in its local camera coordinates, so use this transformation to correct
- * for that.
- * \param[in] src_to_tgt_transformation the transformation
- */
- inline void
- setSourceTransformation (const Eigen::Matrix4f &src_to_tgt_transformation)
- { src_to_tgt_transformation_ = src_to_tgt_transformation; }
-
- /** \brief Reads back the transformation from the source point cloud to the target point cloud.
- * \note The target point cloud must be in its local camera coordinates, so use this transformation to correct
- * for that.
- * \return the transformation
- */
- inline Eigen::Matrix4f
- getSourceTransformation () const
- { return (src_to_tgt_transformation_); }
-
- /** \brief Sets the depth threshold; after projecting the source points in the image space of the target camera,
- * this threshold is applied on the depths of corresponding dexels to eliminate the ones that are too far from
- * each other.
- * \param[in] depth_threshold the depth threshold
- */
- inline void
- setDepthThreshold (const float depth_threshold)
- { depth_threshold_ = depth_threshold; }
-
- /** \brief Reads back the depth threshold; after projecting the source points in the image space of the target
- * camera, this threshold is applied on the depths of corresponding dexels to eliminate the ones that are too
- * far from each other.
- * \return the depth threshold
- */
- inline float
- getDepthThreshold () const
- { return (depth_threshold_); }
-
- /** \brief Computes the correspondences, applying a maximum Euclidean distance threshold.
- * \param correspondences
- * \param[in] max_distance Euclidean distance threshold above which correspondences will be rejected
- */
- void
- determineCorrespondences (Correspondences &correspondences, double max_distance);
-
- /** \brief Computes the correspondences, applying a maximum Euclidean distance threshold.
- * \param correspondences
- * \param[in] max_distance Euclidean distance threshold above which correspondences will be rejected
- */
- void
- determineReciprocalCorrespondences (Correspondences &correspondences, double max_distance);
-
- /** \brief Clone and cast to CorrespondenceEstimationBase */
- virtual typename CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::Ptr
- clone () const
- {
- Ptr copy (new CorrespondenceEstimationOrganizedProjection<PointSource, PointTarget, Scalar> (*this));
- return (copy);
- }
-
- protected:
- using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::target_;
-
- bool
- initCompute ();
-
- float fx_, fy_;
- float cx_, cy_;
- Eigen::Matrix4f src_to_tgt_transformation_;
- float depth_threshold_;
-
- Eigen::Matrix3f projection_matrix_;
-
- public:
- PCL_MAKE_ALIGNED_OPERATOR_NEW
- };
+ return (depth_threshold_);
}
-}
+
+ /** \brief Computes the correspondences, applying a maximum Euclidean distance
+ * threshold.
+ * \param[out] correspondences the found correspondences (index of query point, index
+ * of target point, distance)
+ * \param[in] max_distance Euclidean distance threshold above which correspondences
+ * will be rejected
+ */
+ void
+ determineCorrespondences(Correspondences& correspondences, double max_distance);
+
+ /** \brief Computes the correspondences, applying a maximum Euclidean distance
+ * threshold.
+ * \param[out] correspondences the found correspondences (index of query and target
+ * point, distance)
+ * \param[in] max_distance Euclidean distance threshold above which correspondences
+ * will be rejected
+ */
+ void
+ determineReciprocalCorrespondences(Correspondences& correspondences,
+ double max_distance);
+
+ /** \brief Clone and cast to CorrespondenceEstimationBase */
+ virtual typename CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::Ptr
+ clone() const
+ {
+ Ptr copy(new CorrespondenceEstimationOrganizedProjection<PointSource,
+ PointTarget,
+ Scalar>(*this));
+ return (copy);
+ }
+
+protected:
+ using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::target_;
+
+ bool
+ initCompute();
+
+ float fx_, fy_;
+ float cx_, cy_;
+ Eigen::Matrix4f src_to_tgt_transformation_;
+ float depth_threshold_;
+
+ Eigen::Matrix3f projection_matrix_;
+
+public:
+ PCL_MAKE_ALIGNED_OPERATOR_NEW
+};
+} // namespace registration
+} // namespace pcl
#include <pcl/registration/impl/correspondence_estimation_organized_projection.hpp>
#pragma once
-#include <pcl/registration/correspondence_types.h>
-#include <pcl/registration/correspondence_sorting.h>
#include <pcl/console/print.h>
-#include <pcl/common/transforms.h>
-#include <pcl/point_cloud.h>
+#include <pcl/registration/correspondence_sorting.h>
+#include <pcl/registration/correspondence_types.h>
#include <pcl/search/kdtree.h>
+#include <pcl/point_cloud.h>
+
+namespace pcl {
+namespace registration {
+/** @b CorrespondenceRejector represents the base class for correspondence rejection
+ * methods \author Dirk Holz \ingroup registration
+ */
+class CorrespondenceRejector {
+public:
+ using Ptr = shared_ptr<CorrespondenceRejector>;
+ using ConstPtr = shared_ptr<const CorrespondenceRejector>;
+
+ /** \brief Empty constructor. */
+ CorrespondenceRejector() {}
+
+ /** \brief Empty destructor. */
+ virtual ~CorrespondenceRejector() {}
+
+ /** \brief Provide a pointer to the vector of the input correspondences.
+ * \param[in] correspondences the const shared pointer to a correspondence vector
+ */
+ virtual inline void
+ setInputCorrespondences(const CorrespondencesConstPtr& correspondences)
+ {
+ input_correspondences_ = correspondences;
+ };
+
+ /** \brief Get a pointer to the vector of the input correspondences.
+ * \return correspondences the const shared pointer to a correspondence vector
+ */
+ inline CorrespondencesConstPtr
+ getInputCorrespondences()
+ {
+ return input_correspondences_;
+ };
+
+ /** \brief Run correspondence rejection
+ * \param[out] correspondences Vector of correspondences that have not been rejected.
+ */
+ inline void
+ getCorrespondences(pcl::Correspondences& correspondences)
+ {
+ if (!input_correspondences_ || (input_correspondences_->empty()))
+ return;
+
+ applyRejection(correspondences);
+ }
+
+ /** \brief Get a list of valid correspondences after rejection from the original set
+ * of correspondences. Pure virtual. Compared to \a getCorrespondences this function
+ * is stateless, i.e., input correspondences do not need to be provided beforehand,
+ * but are directly provided in the function call.
+ * \param[in] original_correspondences the set of initial correspondences given
+ * \param[out] remaining_correspondences the resultant filtered set of remaining
+ * correspondences
+ */
+ virtual inline void
+ getRemainingCorrespondences(const pcl::Correspondences& original_correspondences,
+ pcl::Correspondences& remaining_correspondences) = 0;
+
+ /** \brief Determine the indices of query points of
+ * correspondences that have been rejected, i.e., the difference
+ * between the input correspondences (set via \a setInputCorrespondences)
+ * and the given correspondence vector.
+ * \param[in] correspondences Vector of correspondences after rejection
+ * \param[out] indices Vector of query point indices of those correspondences
+ * that have been rejected.
+ */
+ inline void
+ getRejectedQueryIndices(const pcl::Correspondences& correspondences,
+ pcl::Indices& indices)
+ {
+ if (!input_correspondences_ || input_correspondences_->empty()) {
+ PCL_WARN("[pcl::registration::%s::getRejectedQueryIndices] Input correspondences "
+ "not set (lookup of rejected correspondences _not_ possible).\n",
+ getClassName().c_str());
+ return;
+ }
+
+ pcl::getRejectedQueryIndices(*input_correspondences_, correspondences, indices);
+ }
+
+ /** \brief Get a string representation of the name of this class. */
+ inline const std::string&
+ getClassName() const
+ {
+ return (rejection_name_);
+ }
+
+ /** \brief See if this rejector requires source points */
+ virtual bool
+ requiresSourcePoints() const
+ {
+ return (false);
+ }
+
+ /** \brief Abstract method for setting the source cloud */
+ virtual void setSourcePoints(pcl::PCLPointCloud2::ConstPtr /*cloud2*/)
+ {
+ PCL_WARN("[pcl::registration::%s::setSourcePoints] This class does not require an "
+ "input source cloud\n",
+ getClassName().c_str());
+ }
+
+ /** \brief See if this rejector requires source normals */
+ virtual bool
+ requiresSourceNormals() const
+ {
+ return (false);
+ }
+
+ /** \brief Abstract method for setting the source normals */
+ virtual void setSourceNormals(pcl::PCLPointCloud2::ConstPtr /*cloud2*/)
+ {
+ PCL_WARN("[pcl::registration::%s::setSourceNormals] This class does not require "
+ "input source normals\n",
+ getClassName().c_str());
+ }
+ /** \brief See if this rejector requires a target cloud */
+ virtual bool
+ requiresTargetPoints() const
+ {
+ return (false);
+ }
+
+ /** \brief Abstract method for setting the target cloud */
+ virtual void setTargetPoints(pcl::PCLPointCloud2::ConstPtr /*cloud2*/)
+ {
+ PCL_WARN("[pcl::registration::%s::setTargetPoints] This class does not require an "
+ "input target cloud\n",
+ getClassName().c_str());
+ }
+
+ /** \brief See if this rejector requires target normals */
+ virtual bool
+ requiresTargetNormals() const
+ {
+ return (false);
+ }
+
+ /** \brief Abstract method for setting the target normals */
+ virtual void setTargetNormals(pcl::PCLPointCloud2::ConstPtr /*cloud2*/)
+ {
+ PCL_WARN("[pcl::registration::%s::setTargetNormals] This class does not require "
+ "input target normals\n",
+ getClassName().c_str());
+ }
+
+protected:
+ /** \brief The name of the rejection method. */
+ std::string rejection_name_;
+
+ /** \brief The input correspondences. */
+ CorrespondencesConstPtr input_correspondences_;
+
+ /** \brief Abstract rejection method. */
+ virtual void
+ applyRejection(Correspondences& correspondences) = 0;
+};
+
+/** @b DataContainerInterface provides a generic interface for computing correspondence
+ * scores between correspondent points in the input and target clouds
+ * \ingroup registration
+ */
+class DataContainerInterface {
+public:
+ using Ptr = shared_ptr<DataContainerInterface>;
+ using ConstPtr = shared_ptr<const DataContainerInterface>;
+
+ virtual ~DataContainerInterface() = default;
+ virtual double
+ getCorrespondenceScore(int index) = 0;
+ virtual double
+ getCorrespondenceScore(const pcl::Correspondence&) = 0;
+ virtual double
+ getCorrespondenceScoreFromNormals(const pcl::Correspondence&) = 0;
+};
+
+/** @b DataContainer is a container for the input and target point clouds and implements
+ * the interface to compute correspondence scores between correspondent points in the
+ * input and target clouds \ingroup registration
+ */
+template <typename PointT, typename NormalT = pcl::PointNormal>
+class DataContainer : public DataContainerInterface {
+ using PointCloud = pcl::PointCloud<PointT>;
+ using PointCloudPtr = typename PointCloud::Ptr;
+ using PointCloudConstPtr = typename PointCloud::ConstPtr;
+
+ using KdTreePtr = typename pcl::search::KdTree<PointT>::Ptr;
+
+ using Normals = pcl::PointCloud<NormalT>;
+ using NormalsPtr = typename Normals::Ptr;
+ using NormalsConstPtr = typename Normals::ConstPtr;
+
+public:
+ /** \brief Empty constructor. */
+ DataContainer(bool needs_normals = false)
+ : input_()
+ , input_transformed_()
+ , target_()
+ , input_normals_()
+ , input_normals_transformed_()
+ , target_normals_()
+ , tree_(new pcl::search::KdTree<PointT>)
+ , class_name_("DataContainer")
+ , needs_normals_(needs_normals)
+ , target_cloud_updated_(true)
+ , force_no_recompute_(false)
+ {}
+
+ /** \brief Empty destructor */
+ ~DataContainer() {}
+
+ /** \brief Provide a source point cloud dataset (must contain XYZ
+ * data!), used to compute the correspondence distance.
+ * \param[in] cloud a cloud containing XYZ data
+ */
+ inline void
+ setInputSource(const PointCloudConstPtr& cloud)
+ {
+ input_ = cloud;
+ }
+
+ /** \brief Get a pointer to the input point cloud dataset target. */
+ inline PointCloudConstPtr const
+ getInputSource()
+ {
+ return (input_);
+ }
+
+ /** \brief Provide a target point cloud dataset (must contain XYZ
+ * data!), used to compute the correspondence distance.
+ * \param[in] target a cloud containing XYZ data
+ */
+ inline void
+ setInputTarget(const PointCloudConstPtr& target)
+ {
+ target_ = target;
+ target_cloud_updated_ = true;
+ }
+
+ /** \brief Get a pointer to the input point cloud dataset target. */
+ inline PointCloudConstPtr const
+ getInputTarget()
+ {
+ return (target_);
+ }
+
+ /** \brief Provide a pointer to the search object used to find correspondences in
+ * the target cloud.
+ * \param[in] tree a pointer to the spatial search object.
+ * \param[in] force_no_recompute If set to true, this tree will NEVER be
+ * recomputed, regardless of calls to setInputTarget. Only use if you are
+ * confident that the tree will be set correctly.
+ */
+ inline void
+ setSearchMethodTarget(const KdTreePtr& tree, bool force_no_recompute = false)
+ {
+ tree_ = tree;
+ force_no_recompute_ = force_no_recompute;
+ target_cloud_updated_ = true;
+ }
+
+ /** \brief Set the normals computed on the input point cloud
+ * \param[in] normals the normals computed for the input cloud
+ */
+ inline void
+ setInputNormals(const NormalsConstPtr& normals)
+ {
+ input_normals_ = normals;
+ }
+
+ /** \brief Get the normals computed on the input point cloud */
+ inline NormalsConstPtr
+ getInputNormals()
+ {
+ return (input_normals_);
+ }
+
+ /** \brief Set the normals computed on the target point cloud
+ * \param[in] normals the normals computed for the input cloud
+ */
+ inline void
+ setTargetNormals(const NormalsConstPtr& normals)
+ {
+ target_normals_ = normals;
+ }
+
+ /** \brief Get the normals computed on the target point cloud */
+ inline NormalsConstPtr
+ getTargetNormals()
+ {
+ return (target_normals_);
+ }
+
+ /** \brief Get the correspondence score for a point in the input cloud
+ * \param[in] index index of the point in the input cloud
+ */
+ inline double
+ getCorrespondenceScore(int index) override
+ {
+ if (target_cloud_updated_ && !force_no_recompute_) {
+ tree_->setInputCloud(target_);
+ }
+ pcl::Indices indices(1);
+ std::vector<float> distances(1);
+ if (tree_->nearestKSearch((*input_)[index], 1, indices, distances))
+ return (distances[0]);
+ return (std::numeric_limits<double>::max());
+ }
+
+ /** \brief Get the correspondence score for a given pair of correspondent points
+ * \param[in] corr Correspondent points
+ */
+ inline double
+ getCorrespondenceScore(const pcl::Correspondence& corr) override
+ {
+ // Get the source and the target feature from the list
+ const PointT& src = (*input_)[corr.index_query];
+ const PointT& tgt = (*target_)[corr.index_match];
+
+ return ((src.getVector4fMap() - tgt.getVector4fMap()).squaredNorm());
+ }
+
+ /** \brief Get the correspondence score for a given pair of correspondent points based
+ * on the angle between the normals. The normmals for the in put and target clouds
+ * must be set before using this function \param[in] corr Correspondent points
+ */
+ inline double
+ getCorrespondenceScoreFromNormals(const pcl::Correspondence& corr) override
+ {
+ // assert ( (input_normals_->size () != 0) && (target_normals_->size () != 0) &&
+ // "Normals are not set for the input and target point clouds");
+ assert(input_normals_ && target_normals_ &&
+ "Normals are not set for the input and target point clouds");
+ const NormalT& src = (*input_normals_)[corr.index_query];
+ const NormalT& tgt = (*target_normals_)[corr.index_match];
+ return (double((src.normal[0] * tgt.normal[0]) + (src.normal[1] * tgt.normal[1]) +
+ (src.normal[2] * tgt.normal[2])));
+ }
+
+private:
+ /** \brief The input point cloud dataset */
+ PointCloudConstPtr input_;
+
+ /** \brief The input transformed point cloud dataset */
+ PointCloudPtr input_transformed_;
+
+ /** \brief The target point cloud datase. */
+ PointCloudConstPtr target_;
+
+ /** \brief Normals to the input point cloud */
+ NormalsConstPtr input_normals_;
+
+ /** \brief Normals to the input point cloud */
+ NormalsPtr input_normals_transformed_;
+
+ /** \brief Normals to the target point cloud */
+ NormalsConstPtr target_normals_;
+
+ /** \brief A pointer to the spatial search object. */
+ KdTreePtr tree_;
+
+ /** \brief The name of the rejection method. */
+ std::string class_name_;
+
+ /** \brief Should the current data container use normals? */
+ bool needs_normals_;
+
+ /** \brief Variable that stores whether we have a new target cloud, meaning we need to
+ * pre-process it again. This way, we avoid rebuilding the kd-tree */
+ bool target_cloud_updated_;
+
+ /** \brief A flag which, if set, means the tree operating on the target cloud
+ * will never be recomputed*/
+ bool force_no_recompute_;
-namespace pcl
-{
- namespace registration
+ /** \brief Get a string representation of the name of this class. */
+ inline const std::string&
+ getClassName() const
{
- /** @b CorrespondenceRejector represents the base class for correspondence rejection methods
- * \author Dirk Holz
- * \ingroup registration
- */
- class CorrespondenceRejector
- {
- public:
- using Ptr = shared_ptr<CorrespondenceRejector>;
- using ConstPtr = shared_ptr<const CorrespondenceRejector>;
-
- /** \brief Empty constructor. */
- CorrespondenceRejector ()
- {}
-
- /** \brief Empty destructor. */
- virtual ~CorrespondenceRejector () {}
-
- /** \brief Provide a pointer to the vector of the input correspondences.
- * \param[in] correspondences the const shared pointer to a correspondence vector
- */
- virtual inline void
- setInputCorrespondences (const CorrespondencesConstPtr &correspondences)
- {
- input_correspondences_ = correspondences;
- };
-
- /** \brief Get a pointer to the vector of the input correspondences.
- * \return correspondences the const shared pointer to a correspondence vector
- */
- inline CorrespondencesConstPtr
- getInputCorrespondences () { return input_correspondences_; };
-
- /** \brief Run correspondence rejection
- * \param[out] correspondences Vector of correspondences that have not been rejected.
- */
- inline void
- getCorrespondences (pcl::Correspondences &correspondences)
- {
- if (!input_correspondences_ || (input_correspondences_->empty ()))
- return;
-
- applyRejection (correspondences);
- }
-
- /** \brief Get a list of valid correspondences after rejection from the original set of correspondences.
- * Pure virtual. Compared to \a getCorrespondences this function is
- * stateless, i.e., input correspondences do not need to be provided beforehand,
- * but are directly provided in the function call.
- * \param[in] original_correspondences the set of initial correspondences given
- * \param[out] remaining_correspondences the resultant filtered set of remaining correspondences
- */
- virtual inline void
- getRemainingCorrespondences (const pcl::Correspondences& original_correspondences,
- pcl::Correspondences& remaining_correspondences) = 0;
-
- /** \brief Determine the indices of query points of
- * correspondences that have been rejected, i.e., the difference
- * between the input correspondences (set via \a setInputCorrespondences)
- * and the given correspondence vector.
- * \param[in] correspondences Vector of correspondences after rejection
- * \param[out] indices Vector of query point indices of those correspondences
- * that have been rejected.
- */
- inline void
- getRejectedQueryIndices (const pcl::Correspondences &correspondences,
- std::vector<int>& indices)
- {
- if (!input_correspondences_ || input_correspondences_->empty ())
- {
- PCL_WARN ("[pcl::registration::%s::getRejectedQueryIndices] Input correspondences not set (lookup of rejected correspondences _not_ possible).\n", getClassName ().c_str ());
- return;
- }
-
- pcl::getRejectedQueryIndices(*input_correspondences_, correspondences, indices);
- }
-
- /** \brief Get a string representation of the name of this class. */
- inline const std::string&
- getClassName () const { return (rejection_name_); }
-
-
- /** \brief See if this rejector requires source points */
- virtual bool
- requiresSourcePoints () const
- { return (false); }
-
- /** \brief Abstract method for setting the source cloud */
- virtual void
- setSourcePoints (pcl::PCLPointCloud2::ConstPtr /*cloud2*/)
- {
- PCL_WARN ("[pcl::registration::%s::setSourcePoints] This class does not require an input source cloud", getClassName ().c_str ());
- }
-
- /** \brief See if this rejector requires source normals */
- virtual bool
- requiresSourceNormals () const
- { return (false); }
-
- /** \brief Abstract method for setting the source normals */
- virtual void
- setSourceNormals (pcl::PCLPointCloud2::ConstPtr /*cloud2*/)
- {
- PCL_WARN ("[pcl::registration::%s::setSourceNormals] This class does not require input source normals", getClassName ().c_str ());
- }
- /** \brief See if this rejector requires a target cloud */
- virtual bool
- requiresTargetPoints () const
- { return (false); }
-
- /** \brief Abstract method for setting the target cloud */
- virtual void
- setTargetPoints (pcl::PCLPointCloud2::ConstPtr /*cloud2*/)
- {
- PCL_WARN ("[pcl::registration::%s::setTargetPoints] This class does not require an input target cloud", getClassName ().c_str ());
- }
-
- /** \brief See if this rejector requires target normals */
- virtual bool
- requiresTargetNormals () const
- { return (false); }
-
- /** \brief Abstract method for setting the target normals */
- virtual void
- setTargetNormals (pcl::PCLPointCloud2::ConstPtr /*cloud2*/)
- {
- PCL_WARN ("[pcl::registration::%s::setTargetNormals] This class does not require input target normals", getClassName ().c_str ());
- }
-
- protected:
-
- /** \brief The name of the rejection method. */
- std::string rejection_name_;
-
- /** \brief The input correspondences. */
- CorrespondencesConstPtr input_correspondences_;
-
- /** \brief Abstract rejection method. */
- virtual void
- applyRejection (Correspondences &correspondences) = 0;
- };
-
- /** @b DataContainerInterface provides a generic interface for computing correspondence scores between correspondent
- * points in the input and target clouds
- * \ingroup registration
- */
- class DataContainerInterface
- {
- public:
- using Ptr = shared_ptr<DataContainerInterface>;
- using ConstPtr = shared_ptr<const DataContainerInterface>;
-
- virtual ~DataContainerInterface () = default;
- virtual double getCorrespondenceScore (int index) = 0;
- virtual double getCorrespondenceScore (const pcl::Correspondence &) = 0;
- virtual double getCorrespondenceScoreFromNormals (const pcl::Correspondence &) = 0;
- };
-
- /** @b DataContainer is a container for the input and target point clouds and implements the interface
- * to compute correspondence scores between correspondent points in the input and target clouds
- * \ingroup registration
- */
- template <typename PointT, typename NormalT = pcl::PointNormal>
- class DataContainer : public DataContainerInterface
- {
- using PointCloud = pcl::PointCloud<PointT>;
- using PointCloudPtr = typename PointCloud::Ptr;
- using PointCloudConstPtr = typename PointCloud::ConstPtr;
-
- using KdTreePtr = typename pcl::search::KdTree<PointT>::Ptr;
-
- using Normals = pcl::PointCloud<NormalT>;
- using NormalsPtr = typename Normals::Ptr;
- using NormalsConstPtr = typename Normals::ConstPtr;
-
- public:
-
- /** \brief Empty constructor. */
- DataContainer (bool needs_normals = false)
- : input_ ()
- , input_transformed_ ()
- , target_ ()
- , input_normals_ ()
- , input_normals_transformed_ ()
- , target_normals_ ()
- , tree_ (new pcl::search::KdTree<PointT>)
- , class_name_ ("DataContainer")
- , needs_normals_ (needs_normals)
- , target_cloud_updated_ (true)
- , force_no_recompute_ (false)
- {
- }
-
- /** \brief Empty destructor */
- ~DataContainer () {}
-
- /** \brief Provide a source point cloud dataset (must contain XYZ
- * data!), used to compute the correspondence distance.
- * \param[in] cloud a cloud containing XYZ data
- */
- inline void
- setInputSource (const PointCloudConstPtr &cloud)
- {
- input_ = cloud;
- }
-
- /** \brief Get a pointer to the input point cloud dataset target. */
- inline PointCloudConstPtr const
- getInputSource () { return (input_); }
-
- /** \brief Provide a target point cloud dataset (must contain XYZ
- * data!), used to compute the correspondence distance.
- * \param[in] target a cloud containing XYZ data
- */
- inline void
- setInputTarget (const PointCloudConstPtr &target)
- {
- target_ = target;
- target_cloud_updated_ = true;
- }
-
- /** \brief Get a pointer to the input point cloud dataset target. */
- inline PointCloudConstPtr const
- getInputTarget () { return (target_); }
-
- /** \brief Provide a pointer to the search object used to find correspondences in
- * the target cloud.
- * \param[in] tree a pointer to the spatial search object.
- * \param[in] force_no_recompute If set to true, this tree will NEVER be
- * recomputed, regardless of calls to setInputTarget. Only use if you are
- * confident that the tree will be set correctly.
- */
- inline void
- setSearchMethodTarget (const KdTreePtr &tree,
- bool force_no_recompute = false)
- {
- tree_ = tree;
- if (force_no_recompute)
- {
- force_no_recompute_ = true;
- }
- target_cloud_updated_ = true;
- }
-
- /** \brief Set the normals computed on the input point cloud
- * \param[in] normals the normals computed for the input cloud
- */
- inline void
- setInputNormals (const NormalsConstPtr &normals) { input_normals_ = normals; }
-
- /** \brief Get the normals computed on the input point cloud */
- inline NormalsConstPtr
- getInputNormals () { return (input_normals_); }
-
- /** \brief Set the normals computed on the target point cloud
- * \param[in] normals the normals computed for the input cloud
- */
- inline void
- setTargetNormals (const NormalsConstPtr &normals) { target_normals_ = normals; }
-
- /** \brief Get the normals computed on the target point cloud */
- inline NormalsConstPtr
- getTargetNormals () { return (target_normals_); }
-
- /** \brief Get the correspondence score for a point in the input cloud
- * \param[in] index index of the point in the input cloud
- */
- inline double
- getCorrespondenceScore (int index) override
- {
- if ( target_cloud_updated_ && !force_no_recompute_ )
- {
- tree_->setInputCloud (target_);
- }
- std::vector<int> indices (1);
- std::vector<float> distances (1);
- if (tree_->nearestKSearch ((*input_)[index], 1, indices, distances))
- return (distances[0]);
- return (std::numeric_limits<double>::max ());
- }
-
- /** \brief Get the correspondence score for a given pair of correspondent points
- * \param[in] corr Correspondent points
- */
- inline double
- getCorrespondenceScore (const pcl::Correspondence &corr) override
- {
- // Get the source and the target feature from the list
- const PointT &src = (*input_)[corr.index_query];
- const PointT &tgt = (*target_)[corr.index_match];
-
- return ((src.getVector4fMap () - tgt.getVector4fMap ()).squaredNorm ());
- }
-
- /** \brief Get the correspondence score for a given pair of correspondent points based on
- * the angle between the normals. The normmals for the in put and target clouds must be
- * set before using this function
- * \param[in] corr Correspondent points
- */
- inline double
- getCorrespondenceScoreFromNormals (const pcl::Correspondence &corr) override
- {
- //assert ( (input_normals_->size () != 0) && (target_normals_->size () != 0) && "Normals are not set for the input and target point clouds");
- assert (input_normals_ && target_normals_ && "Normals are not set for the input and target point clouds");
- const NormalT &src = (*input_normals_)[corr.index_query];
- const NormalT &tgt = (*target_normals_)[corr.index_match];
- return (double ((src.normal[0] * tgt.normal[0]) + (src.normal[1] * tgt.normal[1]) + (src.normal[2] * tgt.normal[2])));
- }
-
- private:
- /** \brief The input point cloud dataset */
- PointCloudConstPtr input_;
-
- /** \brief The input transformed point cloud dataset */
- PointCloudPtr input_transformed_;
-
- /** \brief The target point cloud datase. */
- PointCloudConstPtr target_;
-
- /** \brief Normals to the input point cloud */
- NormalsConstPtr input_normals_;
-
- /** \brief Normals to the input point cloud */
- NormalsPtr input_normals_transformed_;
-
- /** \brief Normals to the target point cloud */
- NormalsConstPtr target_normals_;
-
- /** \brief A pointer to the spatial search object. */
- KdTreePtr tree_;
-
- /** \brief The name of the rejection method. */
- std::string class_name_;
-
- /** \brief Should the current data container use normals? */
- bool needs_normals_;
-
- /** \brief Variable that stores whether we have a new target cloud, meaning we need to pre-process it again.
- * This way, we avoid rebuilding the kd-tree */
- bool target_cloud_updated_;
-
- /** \brief A flag which, if set, means the tree operating on the target cloud
- * will never be recomputed*/
- bool force_no_recompute_;
-
-
-
- /** \brief Get a string representation of the name of this class. */
- inline const std::string&
- getClassName () const { return (class_name_); }
- };
+ return (class_name_);
}
-}
+};
+} // namespace registration
+} // namespace pcl
#pragma once
#include <pcl/registration/correspondence_rejection.h>
-#include <pcl/memory.h> // for static_pointer_cast
+#include <pcl/conversions.h> // for fromPCLPointCloud2
+#include <pcl/memory.h> // for static_pointer_cast
+
+namespace pcl {
+namespace registration {
+/**
+ * @b CorrespondenceRejectorDistance implements a simple correspondence
+ * rejection method based on thresholding the distances between the
+ * correspondences.
+ *
+ * \note If \ref setInputCloud and \ref setInputTarget are given, then the
+ * distances between correspondences will be estimated using the given XYZ
+ * data, and not read from the set of input correspondences.
+ *
+ * \author Dirk Holz, Radu B. Rusu
+ * \ingroup registration
+ */
+class PCL_EXPORTS CorrespondenceRejectorDistance : public CorrespondenceRejector {
+ using CorrespondenceRejector::getClassName;
+ using CorrespondenceRejector::input_correspondences_;
+ using CorrespondenceRejector::rejection_name_;
+
+public:
+ using Ptr = shared_ptr<CorrespondenceRejectorDistance>;
+ using ConstPtr = shared_ptr<const CorrespondenceRejectorDistance>;
+
+ /** \brief Empty constructor. */
+ CorrespondenceRejectorDistance() : max_distance_(std::numeric_limits<float>::max())
+ {
+ rejection_name_ = "CorrespondenceRejectorDistance";
+ }
+
+ /** \brief Empty destructor */
+ ~CorrespondenceRejectorDistance() {}
+
+ /** \brief Get a list of valid correspondences after rejection from the original set
+ * of correspondences. \param[in] original_correspondences the set of initial
+ * correspondences given \param[out] remaining_correspondences the resultant filtered
+ * set of remaining correspondences
+ */
+ void
+ getRemainingCorrespondences(const pcl::Correspondences& original_correspondences,
+ pcl::Correspondences& remaining_correspondences) override;
+
+ /** \brief Set the maximum distance used for thresholding in correspondence rejection.
+ * \param[in] distance Distance to be used as maximum distance between
+ * correspondences. Correspondences with larger distances are rejected. \note
+ * Internally, the distance will be stored squared.
+ */
+ virtual inline void
+ setMaximumDistance(float distance)
+ {
+ max_distance_ = distance * distance;
+ };
+
+ /** \brief Get the maximum distance used for thresholding in correspondence rejection.
+ */
+ inline float
+ getMaximumDistance() const
+ {
+ return std::sqrt(max_distance_);
+ };
+
+ /** \brief Provide a source point cloud dataset (must contain XYZ
+ * data!), used to compute the correspondence distance.
+ * \param[in] cloud a cloud containing XYZ data
+ */
+ template <typename PointT>
+ inline void
+ setInputSource(const typename pcl::PointCloud<PointT>::ConstPtr& cloud)
+ {
+ if (!data_container_)
+ data_container_.reset(new DataContainer<PointT>);
+ static_pointer_cast<DataContainer<PointT>>(data_container_)->setInputSource(cloud);
+ }
+
+ /** \brief Provide a target point cloud dataset (must contain XYZ
+ * data!), used to compute the correspondence distance.
+ * \param[in] target a cloud containing XYZ data
+ */
+ template <typename PointT>
+ inline void
+ setInputTarget(const typename pcl::PointCloud<PointT>::ConstPtr& target)
+ {
+ if (!data_container_)
+ data_container_.reset(new DataContainer<PointT>);
+ static_pointer_cast<DataContainer<PointT>>(data_container_)->setInputTarget(target);
+ }
+ /** \brief See if this rejector requires source points */
+ bool
+ requiresSourcePoints() const override
+ {
+ return (true);
+ }
+
+ /** \brief Blob method for setting the source cloud */
+ void
+ setSourcePoints(pcl::PCLPointCloud2::ConstPtr cloud2) override
+ {
+ PointCloud<PointXYZ>::Ptr cloud(new PointCloud<PointXYZ>);
+ fromPCLPointCloud2(*cloud2, *cloud);
+ setInputSource<PointXYZ>(cloud);
+ }
+
+ /** \brief See if this rejector requires a target cloud */
+ bool
+ requiresTargetPoints() const override
+ {
+ return (true);
+ }
-namespace pcl
-{
- namespace registration
+ /** \brief Method for setting the target cloud */
+ void
+ setTargetPoints(pcl::PCLPointCloud2::ConstPtr cloud2) override
{
- /**
- * @b CorrespondenceRejectorDistance implements a simple correspondence
- * rejection method based on thresholding the distances between the
- * correspondences.
- *
- * \note If \ref setInputCloud and \ref setInputTarget are given, then the
- * distances between correspondences will be estimated using the given XYZ
- * data, and not read from the set of input correspondences.
- *
- * \author Dirk Holz, Radu B. Rusu
- * \ingroup registration
- */
- class PCL_EXPORTS CorrespondenceRejectorDistance: public CorrespondenceRejector
- {
- using CorrespondenceRejector::input_correspondences_;
- using CorrespondenceRejector::rejection_name_;
- using CorrespondenceRejector::getClassName;
-
- public:
- using Ptr = shared_ptr<CorrespondenceRejectorDistance>;
- using ConstPtr = shared_ptr<const CorrespondenceRejectorDistance>;
-
- /** \brief Empty constructor. */
- CorrespondenceRejectorDistance () : max_distance_(std::numeric_limits<float>::max ())
- {
- rejection_name_ = "CorrespondenceRejectorDistance";
- }
-
- /** \brief Empty destructor */
- ~CorrespondenceRejectorDistance () {}
-
- /** \brief Get a list of valid correspondences after rejection from the original set of correspondences.
- * \param[in] original_correspondences the set of initial correspondences given
- * \param[out] remaining_correspondences the resultant filtered set of remaining correspondences
- */
- void
- getRemainingCorrespondences (const pcl::Correspondences& original_correspondences,
- pcl::Correspondences& remaining_correspondences) override;
-
- /** \brief Set the maximum distance used for thresholding in correspondence rejection.
- * \param[in] distance Distance to be used as maximum distance between correspondences.
- * Correspondences with larger distances are rejected.
- * \note Internally, the distance will be stored squared.
- */
- virtual inline void
- setMaximumDistance (float distance) { max_distance_ = distance * distance; };
-
- /** \brief Get the maximum distance used for thresholding in correspondence rejection. */
- inline float
- getMaximumDistance () const { return std::sqrt (max_distance_); };
-
- /** \brief Provide a source point cloud dataset (must contain XYZ
- * data!), used to compute the correspondence distance.
- * \param[in] cloud a cloud containing XYZ data
- */
- template <typename PointT>
- PCL_DEPRECATED(1, 12, "pcl::registration::CorrespondenceRejectorDistance::setInputCloud is deprecated. Please use setInputSource instead")
- inline void
- setInputCloud (const typename pcl::PointCloud<PointT>::ConstPtr &cloud)
- {
- if (!data_container_)
- data_container_.reset (new DataContainer<PointT>);
- static_pointer_cast<DataContainer<PointT> > (data_container_)->setInputSource (cloud);
- }
-
- /** \brief Provide a source point cloud dataset (must contain XYZ
- * data!), used to compute the correspondence distance.
- * \param[in] cloud a cloud containing XYZ data
- */
- template <typename PointT> inline void
- setInputSource (const typename pcl::PointCloud<PointT>::ConstPtr &cloud)
- {
- if (!data_container_)
- data_container_.reset (new DataContainer<PointT>);
- static_pointer_cast<DataContainer<PointT> > (data_container_)->setInputSource (cloud);
- }
-
- /** \brief Provide a target point cloud dataset (must contain XYZ
- * data!), used to compute the correspondence distance.
- * \param[in] target a cloud containing XYZ data
- */
- template <typename PointT> inline void
- setInputTarget (const typename pcl::PointCloud<PointT>::ConstPtr &target)
- {
- if (!data_container_)
- data_container_.reset (new DataContainer<PointT>);
- static_pointer_cast<DataContainer<PointT> > (data_container_)->setInputTarget (target);
- }
-
-
- /** \brief See if this rejector requires source points */
- bool
- requiresSourcePoints () const override
- { return (true); }
-
- /** \brief Blob method for setting the source cloud */
- void
- setSourcePoints (pcl::PCLPointCloud2::ConstPtr cloud2) override
- {
- PointCloud<PointXYZ>::Ptr cloud (new PointCloud<PointXYZ>);
- fromPCLPointCloud2 (*cloud2, *cloud);
- setInputSource<PointXYZ> (cloud);
- }
-
- /** \brief See if this rejector requires a target cloud */
- bool
- requiresTargetPoints () const override
- { return (true); }
-
- /** \brief Method for setting the target cloud */
- void
- setTargetPoints (pcl::PCLPointCloud2::ConstPtr cloud2) override
- {
- PointCloud<PointXYZ>::Ptr cloud (new PointCloud<PointXYZ>);
- fromPCLPointCloud2 (*cloud2, *cloud);
- setInputTarget<PointXYZ> (cloud);
- }
-
- /** \brief Provide a pointer to the search object used to find correspondences in
- * the target cloud.
- * \param[in] tree a pointer to the spatial search object.
- * \param[in] force_no_recompute If set to true, this tree will NEVER be
- * recomputed, regardless of calls to setInputTarget. Only use if you are
- * confident that the tree will be set correctly.
- */
- template <typename PointT> inline void
- setSearchMethodTarget (const typename pcl::search::KdTree<PointT>::Ptr &tree,
- bool force_no_recompute = false)
- {
- static_pointer_cast< DataContainer<PointT> >
- (data_container_)->setSearchMethodTarget (tree, force_no_recompute );
- }
-
-
- protected:
-
- /** \brief Apply the rejection algorithm.
- * \param[out] correspondences the set of resultant correspondences.
- */
- inline void
- applyRejection (pcl::Correspondences &correspondences) override
- {
- getRemainingCorrespondences (*input_correspondences_, correspondences);
- }
-
- /** \brief The maximum distance threshold between two correspondent points in source <-> target. If the
- * distance is larger than this threshold, the points will not be ignored in the alignment process.
- */
- float max_distance_;
-
- using DataContainerPtr = DataContainerInterface::Ptr;
-
- /** \brief A pointer to the DataContainer object containing the input and target point clouds */
- DataContainerPtr data_container_;
- };
+ PointCloud<PointXYZ>::Ptr cloud(new PointCloud<PointXYZ>);
+ fromPCLPointCloud2(*cloud2, *cloud);
+ setInputTarget<PointXYZ>(cloud);
+ }
+
+ /** \brief Provide a pointer to the search object used to find correspondences in
+ * the target cloud.
+ * \param[in] tree a pointer to the spatial search object.
+ * \param[in] force_no_recompute If set to true, this tree will NEVER be
+ * recomputed, regardless of calls to setInputTarget. Only use if you are
+ * confident that the tree will be set correctly.
+ */
+ template <typename PointT>
+ inline void
+ setSearchMethodTarget(const typename pcl::search::KdTree<PointT>::Ptr& tree,
+ bool force_no_recompute = false)
+ {
+ static_pointer_cast<DataContainer<PointT>>(data_container_)
+ ->setSearchMethodTarget(tree, force_no_recompute);
+ }
+protected:
+ /** \brief Apply the rejection algorithm.
+ * \param[out] correspondences the set of resultant correspondences.
+ */
+ inline void
+ applyRejection(pcl::Correspondences& correspondences) override
+ {
+ getRemainingCorrespondences(*input_correspondences_, correspondences);
}
-}
-#include <pcl/registration/impl/correspondence_rejection_distance.hpp>
+ /** \brief The maximum distance threshold between two correspondent points in source
+ * <-> target. If the distance is larger than this threshold, the points will not be
+ * ignored in the alignment process.
+ */
+ float max_distance_;
+
+ using DataContainerPtr = DataContainerInterface::Ptr;
+
+ /** \brief A pointer to the DataContainer object containing the input and target point
+ * clouds */
+ DataContainerPtr data_container_;
+};
+
+} // namespace registration
+} // namespace pcl
#include <unordered_map>
-namespace pcl
-{
- namespace registration
+namespace pcl {
+namespace registration {
+/** \brief CorrespondenceRejectorFeatures implements a correspondence rejection method
+ * based on a set of feature descriptors. Given an input feature space, the method
+ * checks if each feature in the source cloud has a correspondence in the target cloud,
+ * either by checking the first K (given) point correspondences, or by defining a
+ * tolerance threshold via a radius in feature space. \todo explain this better. \author
+ * Radu B. Rusu \ingroup registration
+ */
+class PCL_EXPORTS CorrespondenceRejectorFeatures : public CorrespondenceRejector {
+ using CorrespondenceRejector::getClassName;
+ using CorrespondenceRejector::input_correspondences_;
+ using CorrespondenceRejector::rejection_name_;
+
+public:
+ using Ptr = shared_ptr<CorrespondenceRejectorFeatures>;
+ using ConstPtr = shared_ptr<const CorrespondenceRejectorFeatures>;
+
+ /** \brief Empty constructor. */
+ CorrespondenceRejectorFeatures() : max_distance_(std::numeric_limits<float>::max())
{
- /** \brief CorrespondenceRejectorFeatures implements a correspondence rejection method based on a set of feature
- * descriptors. Given an input feature space, the method checks if each feature in the source cloud has a
- * correspondence in the target cloud, either by checking the first K (given) point correspondences, or
- * by defining a tolerance threshold via a radius in feature space.
- * \todo explain this better.
- * \author Radu B. Rusu
- * \ingroup registration
- */
- class PCL_EXPORTS CorrespondenceRejectorFeatures: public CorrespondenceRejector
- {
- using CorrespondenceRejector::input_correspondences_;
- using CorrespondenceRejector::rejection_name_;
- using CorrespondenceRejector::getClassName;
-
- public:
- using Ptr = shared_ptr<CorrespondenceRejectorFeatures>;
- using ConstPtr = shared_ptr<const CorrespondenceRejectorFeatures>;
-
- /** \brief Empty constructor. */
- CorrespondenceRejectorFeatures () : max_distance_ (std::numeric_limits<float>::max ())
- {
- rejection_name_ = "CorrespondenceRejectorFeatures";
- }
-
- /** \brief Empty destructor. */
- ~CorrespondenceRejectorFeatures () {}
-
- /** \brief Get a list of valid correspondences after rejection from the original set of correspondences
- * \param[in] original_correspondences the set of initial correspondences given
- * \param[out] remaining_correspondences the resultant filtered set of remaining correspondences
- */
- void
- getRemainingCorrespondences (const pcl::Correspondences& original_correspondences,
- pcl::Correspondences& remaining_correspondences) override;
-
- /** \brief Provide a pointer to a cloud of feature descriptors associated with the source point cloud
- * \param[in] source_feature a cloud of feature descriptors associated with the source point cloud
- * \param[in] key a string that uniquely identifies the feature
- */
- template <typename FeatureT> inline void
- setSourceFeature (const typename pcl::PointCloud<FeatureT>::ConstPtr &source_feature,
- const std::string &key);
-
- /** \brief Get a pointer to the source cloud's feature descriptors, specified by the given \a key
- * \param[in] key a string that uniquely identifies the feature (must match the key provided by setSourceFeature)
- */
- template <typename FeatureT> inline typename pcl::PointCloud<FeatureT>::ConstPtr
- getSourceFeature (const std::string &key);
-
- /** \brief Provide a pointer to a cloud of feature descriptors associated with the target point cloud
- * \param[in] target_feature a cloud of feature descriptors associated with the target point cloud
- * \param[in] key a string that uniquely identifies the feature
- */
- template <typename FeatureT> inline void
- setTargetFeature (const typename pcl::PointCloud<FeatureT>::ConstPtr &target_feature,
- const std::string &key);
-
- /** \brief Get a pointer to the source cloud's feature descriptors, specified by the given \a key
- * \param[in] key a string that uniquely identifies the feature (must match the key provided by setTargetFeature)
- */
- template <typename FeatureT> inline typename pcl::PointCloud<FeatureT>::ConstPtr
- getTargetFeature (const std::string &key);
-
- /** \brief Set a hard distance threshold in the feature \a FeatureT space, between source and target
- * features. Any feature correspondence that is above this threshold will be considered bad and will be
- * filtered out.
- * \param[in] thresh the distance threshold
- * \param[in] key a string that uniquely identifies the feature
- */
- template <typename FeatureT> inline void
- setDistanceThreshold (double thresh, const std::string &key);
-
- /** \brief Test that all features are valid (i.e., does each key have a valid source cloud, target cloud,
- * and search method)
- */
- inline bool
- hasValidFeatures ();
-
- /** \brief Provide a boost shared pointer to a PointRepresentation to be used when comparing features
- * \param[in] key a string that uniquely identifies the feature
- * \param[in] fr the point feature representation to be used
- */
- template <typename FeatureT> inline void
- setFeatureRepresentation (const typename pcl::PointRepresentation<FeatureT>::ConstPtr &fr,
- const std::string &key);
-
- protected:
-
- /** \brief Apply the rejection algorithm.
- * \param[out] correspondences the set of resultant correspondences.
- */
- inline void
- applyRejection (pcl::Correspondences &correspondences) override
- {
- getRemainingCorrespondences (*input_correspondences_, correspondences);
- }
-
- /** \brief The maximum distance threshold between two correspondent points in source <-> target. If the
- * distance is larger than this threshold, the points will not be ignored in the alignment process.
- */
- float max_distance_;
-
- class FeatureContainerInterface
- {
- public:
- /** \brief Empty destructor */
- virtual ~FeatureContainerInterface () = default;
- virtual bool isValid () = 0;
- virtual double getCorrespondenceScore (int index) = 0;
- virtual bool isCorrespondenceValid (int index) = 0;
-
- using Ptr = shared_ptr<FeatureContainerInterface>;
- };
-
- using FeaturesMap = std::unordered_map<std::string, FeatureContainerInterface::Ptr>;
-
- /** \brief An STL map containing features to use when performing the correspondence search.*/
- FeaturesMap features_map_;
-
- /** \brief An inner class containing pointers to the source and target feature clouds
- * and the parameters needed to perform the correspondence search. This class extends
- * FeatureContainerInterface, which contains abstract methods for any methods that do not depend on the
- * FeatureT --- these methods can thus be called from a pointer to FeatureContainerInterface without
- * casting to the derived class.
- */
- template <typename FeatureT>
- class FeatureContainer : public pcl::registration::CorrespondenceRejectorFeatures::FeatureContainerInterface
- {
- public:
- using FeatureCloudConstPtr = typename pcl::PointCloud<FeatureT>::ConstPtr;
- using SearchMethod = std::function<int (const pcl::PointCloud<FeatureT> &, int, std::vector<int> &, std::vector<float> &)>;
-
- using PointRepresentationConstPtr = typename pcl::PointRepresentation<FeatureT>::ConstPtr;
-
- FeatureContainer () : thresh_(std::numeric_limits<double>::max ()), feature_representation_()
- {
- }
-
- /** \brief Empty destructor */
- ~FeatureContainer () {}
-
- inline void
- setSourceFeature (const FeatureCloudConstPtr &source_features)
- {
- source_features_ = source_features;
- }
-
- inline FeatureCloudConstPtr
- getSourceFeature ()
- {
- return (source_features_);
- }
-
- inline void
- setTargetFeature (const FeatureCloudConstPtr &target_features)
- {
- target_features_ = target_features;
- }
-
- inline FeatureCloudConstPtr
- getTargetFeature ()
- {
- return (target_features_);
- }
-
- inline void
- setDistanceThreshold (double thresh)
- {
- thresh_ = thresh;
- }
-
- inline bool
- isValid () override
- {
- if (!source_features_ || !target_features_)
- return (false);
- return (source_features_->size () > 0 &&
- target_features_->size () > 0);
- }
-
- /** \brief Provide a boost shared pointer to a PointRepresentation to be used when comparing features
- * \param[in] fr the point feature representation to be used
- */
- inline void
- setFeatureRepresentation (const PointRepresentationConstPtr &fr)
- {
- feature_representation_ = fr;
- }
-
- /** \brief Obtain a score between a pair of correspondences.
- * \param[in] index the index to check in the list of correspondences
- * \return score the resultant computed score
- */
- inline double
- getCorrespondenceScore (int index) override
- {
- // If no feature representation was given, reset to the default implementation for FeatureT
- if (!feature_representation_)
- feature_representation_.reset (new DefaultFeatureRepresentation<FeatureT>);
-
- // Get the source and the target feature from the list
- const FeatureT &feat_src = (*source_features_)[index];
- const FeatureT &feat_tgt = (*target_features_)[index];
-
- // Check if the representations are valid
- if (!feature_representation_->isValid (feat_src) || !feature_representation_->isValid (feat_tgt))
- {
- PCL_ERROR ("[pcl::registration::%s::getCorrespondenceScore] Invalid feature representation given!\n", this->getClassName ().c_str ());
- return (std::numeric_limits<double>::max ());
- }
-
- // Set the internal feature point representation of choice
- Eigen::VectorXf feat_src_ptr = Eigen::VectorXf::Zero (feature_representation_->getNumberOfDimensions ());
- feature_representation_->vectorize (FeatureT (feat_src), feat_src_ptr);
- Eigen::VectorXf feat_tgt_ptr = Eigen::VectorXf::Zero (feature_representation_->getNumberOfDimensions ());
- feature_representation_->vectorize (FeatureT (feat_tgt), feat_tgt_ptr);
-
- // Compute the L2 norm
- return ((feat_src_ptr - feat_tgt_ptr).squaredNorm ());
- }
-
- /** \brief Check whether the correspondence pair at the given index is valid
- * by computing the score and testing it against the user given threshold
- * \param[in] index the index to check in the list of correspondences
- * \return true if the correspondence is good, false otherwise
- */
- inline bool
- isCorrespondenceValid (int index) override
- {
- return (getCorrespondenceScore (index) < thresh_ * thresh_);
- }
-
- private:
- FeatureCloudConstPtr source_features_, target_features_;
- SearchMethod search_method_;
-
- /** \brief The L2 squared Euclidean threshold. */
- double thresh_;
-
- /** \brief The internal point feature representation used. */
- PointRepresentationConstPtr feature_representation_;
- };
- };
+ rejection_name_ = "CorrespondenceRejectorFeatures";
}
-}
+
+ /** \brief Empty destructor. */
+ ~CorrespondenceRejectorFeatures() {}
+
+ /** \brief Get a list of valid correspondences after rejection from the original set
+ * of correspondences \param[in] original_correspondences the set of initial
+ * correspondences given \param[out] remaining_correspondences the resultant filtered
+ * set of remaining correspondences
+ */
+ void
+ getRemainingCorrespondences(const pcl::Correspondences& original_correspondences,
+ pcl::Correspondences& remaining_correspondences) override;
+
+ /** \brief Provide a pointer to a cloud of feature descriptors associated with the
+ * source point cloud \param[in] source_feature a cloud of feature descriptors
+ * associated with the source point cloud \param[in] key a string that uniquely
+ * identifies the feature
+ */
+ template <typename FeatureT>
+ inline void
+ setSourceFeature(const typename pcl::PointCloud<FeatureT>::ConstPtr& source_feature,
+ const std::string& key);
+
+ /** \brief Get a pointer to the source cloud's feature descriptors, specified by the
+ * given \a key \param[in] key a string that uniquely identifies the feature (must
+ * match the key provided by setSourceFeature)
+ */
+ template <typename FeatureT>
+ inline typename pcl::PointCloud<FeatureT>::ConstPtr
+ getSourceFeature(const std::string& key);
+
+ /** \brief Provide a pointer to a cloud of feature descriptors associated with the
+ * target point cloud \param[in] target_feature a cloud of feature descriptors
+ * associated with the target point cloud \param[in] key a string that uniquely
+ * identifies the feature
+ */
+ template <typename FeatureT>
+ inline void
+ setTargetFeature(const typename pcl::PointCloud<FeatureT>::ConstPtr& target_feature,
+ const std::string& key);
+
+ /** \brief Get a pointer to the source cloud's feature descriptors, specified by the
+ * given \a key \param[in] key a string that uniquely identifies the feature (must
+ * match the key provided by setTargetFeature)
+ */
+ template <typename FeatureT>
+ inline typename pcl::PointCloud<FeatureT>::ConstPtr
+ getTargetFeature(const std::string& key);
+
+ /** \brief Set a hard distance threshold in the feature \a FeatureT space, between
+ * source and target features. Any feature correspondence that is above this threshold
+ * will be considered bad and will be filtered out. \param[in] thresh the distance
+ * threshold \param[in] key a string that uniquely identifies the feature
+ */
+ template <typename FeatureT>
+ inline void
+ setDistanceThreshold(double thresh, const std::string& key);
+
+ /** \brief Test that all features are valid (i.e., does each key have a valid source
+ * cloud, target cloud, and search method)
+ */
+ inline bool
+ hasValidFeatures();
+
+ /** \brief Provide a boost shared pointer to a PointRepresentation to be used when
+ * comparing features \param[in] key a string that uniquely identifies the feature
+ * \param[in] fr the point feature representation to be used
+ */
+ template <typename FeatureT>
+ inline void
+ setFeatureRepresentation(
+ const typename pcl::PointRepresentation<FeatureT>::ConstPtr& fr,
+ const std::string& key);
+
+protected:
+ /** \brief Apply the rejection algorithm.
+ * \param[out] correspondences the set of resultant correspondences.
+ */
+ inline void
+ applyRejection(pcl::Correspondences& correspondences) override
+ {
+ getRemainingCorrespondences(*input_correspondences_, correspondences);
+ }
+
+ /** \brief The maximum distance threshold between two correspondent points in source
+ * <-> target. If the distance is larger than this threshold, the points will not be
+ * ignored in the alignment process.
+ */
+ float max_distance_;
+
+ class FeatureContainerInterface {
+ public:
+ /** \brief Empty destructor */
+ virtual ~FeatureContainerInterface() = default;
+ virtual bool
+ isValid() = 0;
+ virtual double
+ getCorrespondenceScore(int index) = 0;
+ virtual bool
+ isCorrespondenceValid(int index) = 0;
+
+ using Ptr = shared_ptr<FeatureContainerInterface>;
+ };
+
+ using FeaturesMap = std::unordered_map<std::string, FeatureContainerInterface::Ptr>;
+
+ /** \brief An STL map containing features to use when performing the correspondence
+ * search.*/
+ FeaturesMap features_map_;
+
+ /** \brief An inner class containing pointers to the source and target feature clouds
+ * and the parameters needed to perform the correspondence search. This class extends
+ * FeatureContainerInterface, which contains abstract methods for any methods that do
+ * not depend on the FeatureT --- these methods can thus be called from a pointer to
+ * FeatureContainerInterface without casting to the derived class.
+ */
+ template <typename FeatureT>
+ class FeatureContainer : public pcl::registration::CorrespondenceRejectorFeatures::
+ FeatureContainerInterface {
+ public:
+ using FeatureCloudConstPtr = typename pcl::PointCloud<FeatureT>::ConstPtr;
+ using SearchMethod = std::function<int(
+ const pcl::PointCloud<FeatureT>&, int, pcl::Indices&, std::vector<float>&)>;
+
+ using PointRepresentationConstPtr =
+ typename pcl::PointRepresentation<FeatureT>::ConstPtr;
+
+ FeatureContainer()
+ : thresh_(std::numeric_limits<double>::max()), feature_representation_()
+ {}
+
+ /** \brief Empty destructor */
+ ~FeatureContainer() {}
+
+ inline void
+ setSourceFeature(const FeatureCloudConstPtr& source_features)
+ {
+ source_features_ = source_features;
+ }
+
+ inline FeatureCloudConstPtr
+ getSourceFeature()
+ {
+ return (source_features_);
+ }
+
+ inline void
+ setTargetFeature(const FeatureCloudConstPtr& target_features)
+ {
+ target_features_ = target_features;
+ }
+
+ inline FeatureCloudConstPtr
+ getTargetFeature()
+ {
+ return (target_features_);
+ }
+
+ inline void
+ setDistanceThreshold(double thresh)
+ {
+ thresh_ = thresh;
+ }
+
+ inline bool
+ isValid() override
+ {
+ if (!source_features_ || !target_features_)
+ return (false);
+ return (source_features_->size() > 0 && target_features_->size() > 0);
+ }
+
+ /** \brief Provide a boost shared pointer to a PointRepresentation to be used when
+ * comparing features \param[in] fr the point feature representation to be used
+ */
+ inline void
+ setFeatureRepresentation(const PointRepresentationConstPtr& fr)
+ {
+ feature_representation_ = fr;
+ }
+
+ /** \brief Obtain a score between a pair of correspondences.
+ * \param[in] index the index to check in the list of correspondences
+ * \return score the resultant computed score
+ */
+ inline double
+ getCorrespondenceScore(int index) override
+ {
+ // If no feature representation was given, reset to the default implementation for
+ // FeatureT
+ if (!feature_representation_)
+ feature_representation_.reset(new DefaultFeatureRepresentation<FeatureT>);
+
+ // Get the source and the target feature from the list
+ const FeatureT& feat_src = (*source_features_)[index];
+ const FeatureT& feat_tgt = (*target_features_)[index];
+
+ // Check if the representations are valid
+ if (!feature_representation_->isValid(feat_src) ||
+ !feature_representation_->isValid(feat_tgt)) {
+ PCL_ERROR("[pcl::registration::%s::getCorrespondenceScore] Invalid feature "
+ "representation given!\n",
+ this->getClassName().c_str());
+ return (std::numeric_limits<double>::max());
+ }
+
+ // Set the internal feature point representation of choice
+ Eigen::VectorXf feat_src_ptr =
+ Eigen::VectorXf::Zero(feature_representation_->getNumberOfDimensions());
+ feature_representation_->vectorize(FeatureT(feat_src), feat_src_ptr);
+ Eigen::VectorXf feat_tgt_ptr =
+ Eigen::VectorXf::Zero(feature_representation_->getNumberOfDimensions());
+ feature_representation_->vectorize(FeatureT(feat_tgt), feat_tgt_ptr);
+
+ // Compute the L2 norm
+ return ((feat_src_ptr - feat_tgt_ptr).squaredNorm());
+ }
+
+ /** \brief Check whether the correspondence pair at the given index is valid
+ * by computing the score and testing it against the user given threshold
+ * \param[in] index the index to check in the list of correspondences
+ * \return true if the correspondence is good, false otherwise
+ */
+ inline bool
+ isCorrespondenceValid(int index) override
+ {
+ return (getCorrespondenceScore(index) < thresh_ * thresh_);
+ }
+
+ private:
+ FeatureCloudConstPtr source_features_, target_features_;
+ SearchMethod search_method_;
+
+ /** \brief The L2 squared Euclidean threshold. */
+ double thresh_;
+
+ /** \brief The internal point feature representation used. */
+ PointRepresentationConstPtr feature_representation_;
+ };
+};
+} // namespace registration
+} // namespace pcl
#include <pcl/registration/impl/correspondence_rejection_features.hpp>
#pragma once
#include <pcl/registration/correspondence_rejection.h>
-#include <pcl/memory.h> // for static_pointer_cast
+#include <pcl/conversions.h> // for fromPCLPointCloud2
+#include <pcl/memory.h> // for static_pointer_cast
#include <pcl/point_cloud.h>
+namespace pcl {
+namespace registration {
+/** \brief CorrespondenceRejectorMedianDistance implements a simple correspondence
+ * rejection method based on thresholding based on the median distance between the
+ * correspondences.
+ *
+ * \note If \ref setInputCloud and \ref setInputTarget are given, then the
+ * distances between correspondences will be estimated using the given XYZ
+ * data, and not read from the set of input correspondences.
+ *
+ * \author Aravindhan K Krishnan. This code is ported from libpointmatcher
+ * (https://github.com/ethz-asl/libpointmatcher) \ingroup registration
+ */
+class PCL_EXPORTS CorrespondenceRejectorMedianDistance : public CorrespondenceRejector {
+ using CorrespondenceRejector::getClassName;
+ using CorrespondenceRejector::input_correspondences_;
+ using CorrespondenceRejector::rejection_name_;
+
+public:
+ using Ptr = shared_ptr<CorrespondenceRejectorMedianDistance>;
+ using ConstPtr = shared_ptr<const CorrespondenceRejectorMedianDistance>;
+
+ /** \brief Empty constructor. */
+ CorrespondenceRejectorMedianDistance() : median_distance_(0), factor_(1.0)
+ {
+ rejection_name_ = "CorrespondenceRejectorMedianDistance";
+ }
+
+ /** \brief Get a list of valid correspondences after rejection from the original set
+ * of correspondences. \param[in] original_correspondences the set of initial
+ * correspondences given \param[out] remaining_correspondences the resultant filtered
+ * set of remaining correspondences
+ */
+ void
+ getRemainingCorrespondences(const pcl::Correspondences& original_correspondences,
+ pcl::Correspondences& remaining_correspondences) override;
+
+ /** \brief Get the median distance used for thresholding in correspondence rejection.
+ */
+ inline double
+ getMedianDistance() const
+ {
+ return (median_distance_);
+ };
+
+ /** \brief Provide a source point cloud dataset (must contain XYZ
+ * data!), used to compute the correspondence distance.
+ * \param[in] cloud a cloud containing XYZ data
+ */
+ template <typename PointT>
+ inline void
+ setInputSource(const typename pcl::PointCloud<PointT>::ConstPtr& cloud)
+ {
+ if (!data_container_)
+ data_container_.reset(new DataContainer<PointT>);
+ static_pointer_cast<DataContainer<PointT>>(data_container_)->setInputSource(cloud);
+ }
+
+ /** \brief Provide a target point cloud dataset (must contain XYZ
+ * data!), used to compute the correspondence distance.
+ * \param[in] target a cloud containing XYZ data
+ */
+ template <typename PointT>
+ inline void
+ setInputTarget(const typename pcl::PointCloud<PointT>::ConstPtr& target)
+ {
+ if (!data_container_)
+ data_container_.reset(new DataContainer<PointT>);
+ static_pointer_cast<DataContainer<PointT>>(data_container_)->setInputTarget(target);
+ }
-namespace pcl
-{
- namespace registration
+ /** \brief See if this rejector requires source points */
+ bool
+ requiresSourcePoints() const override
{
- /** \brief CorrespondenceRejectorMedianDistance implements a simple correspondence
- * rejection method based on thresholding based on the median distance between the
- * correspondences.
- *
- * \note If \ref setInputCloud and \ref setInputTarget are given, then the
- * distances between correspondences will be estimated using the given XYZ
- * data, and not read from the set of input correspondences.
- *
- * \author Aravindhan K Krishnan. This code is ported from libpointmatcher (https://github.com/ethz-asl/libpointmatcher)
- * \ingroup registration
- */
- class PCL_EXPORTS CorrespondenceRejectorMedianDistance: public CorrespondenceRejector
- {
- using CorrespondenceRejector::input_correspondences_;
- using CorrespondenceRejector::rejection_name_;
- using CorrespondenceRejector::getClassName;
-
- public:
- using Ptr = shared_ptr<CorrespondenceRejectorMedianDistance>;
- using ConstPtr = shared_ptr<const CorrespondenceRejectorMedianDistance>;
-
- /** \brief Empty constructor. */
- CorrespondenceRejectorMedianDistance ()
- : median_distance_ (0)
- , factor_ (1.0)
- {
- rejection_name_ = "CorrespondenceRejectorMedianDistance";
- }
-
- /** \brief Get a list of valid correspondences after rejection from the original set of correspondences.
- * \param[in] original_correspondences the set of initial correspondences given
- * \param[out] remaining_correspondences the resultant filtered set of remaining correspondences
- */
- void
- getRemainingCorrespondences (const pcl::Correspondences& original_correspondences,
- pcl::Correspondences& remaining_correspondences) override;
-
- /** \brief Get the median distance used for thresholding in correspondence rejection. */
- inline double
- getMedianDistance () const { return (median_distance_); };
-
- /** \brief Provide a source point cloud dataset (must contain XYZ
- * data!), used to compute the correspondence distance.
- * \param[in] cloud a cloud containing XYZ data
- */
- template <typename PointT> inline void
- setInputSource (const typename pcl::PointCloud<PointT>::ConstPtr &cloud)
- {
- if (!data_container_)
- data_container_.reset (new DataContainer<PointT>);
- static_pointer_cast<DataContainer<PointT> > (data_container_)->setInputSource (cloud);
- }
-
- /** \brief Provide a source point cloud dataset (must contain XYZ
- * data!), used to compute the correspondence distance.
- * \param[in] cloud a cloud containing XYZ data
- */
- template <typename PointT>
- PCL_DEPRECATED(1, 12, "pcl::registration::CorrespondenceRejectorMedianDistance::setInputCloud is deprecated. Please use setInputSource instead")
- inline void
- setInputCloud (const typename pcl::PointCloud<PointT>::ConstPtr &cloud)
- {
- if (!data_container_)
- data_container_.reset (new DataContainer<PointT>);
- static_pointer_cast<DataContainer<PointT> > (data_container_)->setInputSource (cloud);
- }
-
- /** \brief Provide a target point cloud dataset (must contain XYZ
- * data!), used to compute the correspondence distance.
- * \param[in] target a cloud containing XYZ data
- */
- template <typename PointT> inline void
- setInputTarget (const typename pcl::PointCloud<PointT>::ConstPtr &target)
- {
- if (!data_container_)
- data_container_.reset (new DataContainer<PointT>);
- static_pointer_cast<DataContainer<PointT> > (data_container_)->setInputTarget (target);
- }
-
- /** \brief See if this rejector requires source points */
- bool
- requiresSourcePoints () const override
- { return (true); }
-
- /** \brief Blob method for setting the source cloud */
- void
- setSourcePoints (pcl::PCLPointCloud2::ConstPtr cloud2) override
- {
- PointCloud<PointXYZ>::Ptr cloud (new PointCloud<PointXYZ>);
- fromPCLPointCloud2 (*cloud2, *cloud);
- setInputSource<PointXYZ> (cloud);
- }
-
- /** \brief See if this rejector requires a target cloud */
- bool
- requiresTargetPoints () const override
- { return (true); }
-
- /** \brief Method for setting the target cloud */
- void
- setTargetPoints (pcl::PCLPointCloud2::ConstPtr cloud2) override
- {
- PointCloud<PointXYZ>::Ptr cloud (new PointCloud<PointXYZ>);
- fromPCLPointCloud2 (*cloud2, *cloud);
- setInputTarget<PointXYZ> (cloud);
- }
-
- /** \brief Provide a pointer to the search object used to find correspondences in
- * the target cloud.
- * \param[in] tree a pointer to the spatial search object.
- * \param[in] force_no_recompute If set to true, this tree will NEVER be
- * recomputed, regardless of calls to setInputTarget. Only use if you are
- * confident that the tree will be set correctly.
- */
- template <typename PointT> inline void
- setSearchMethodTarget (const typename pcl::search::KdTree<PointT>::Ptr &tree,
- bool force_no_recompute = false)
- {
- static_pointer_cast< DataContainer<PointT> >
- (data_container_)->setSearchMethodTarget (tree, force_no_recompute );
- }
-
- /** \brief Set the factor for correspondence rejection. Points with distance greater than median times factor
- * will be rejected
- * \param[in] factor value
- */
- inline void
- setMedianFactor (double factor) { factor_ = factor; };
-
- /** \brief Get the factor used for thresholding in correspondence rejection. */
- inline double
- getMedianFactor () const { return factor_; };
-
- protected:
-
- /** \brief Apply the rejection algorithm.
- * \param[out] correspondences the set of resultant correspondences.
- */
- inline void
- applyRejection (pcl::Correspondences &correspondences) override
- {
- getRemainingCorrespondences (*input_correspondences_, correspondences);
- }
-
- /** \brief The median distance threshold between two correspondent points in source <-> target.
- */
- double median_distance_;
-
- /** \brief The factor for correspondence rejection. Points with distance greater than median times factor
- * will be rejected
- */
- double factor_;
-
- using DataContainerPtr = DataContainerInterface::Ptr;
-
- /** \brief A pointer to the DataContainer object containing the input and target point clouds */
- DataContainerPtr data_container_;
- };
+ return (true);
}
-}
-#include <pcl/registration/impl/correspondence_rejection_median_distance.hpp>
+ /** \brief Blob method for setting the source cloud */
+ void
+ setSourcePoints(pcl::PCLPointCloud2::ConstPtr cloud2) override
+ {
+ PointCloud<PointXYZ>::Ptr cloud(new PointCloud<PointXYZ>);
+ fromPCLPointCloud2(*cloud2, *cloud);
+ setInputSource<PointXYZ>(cloud);
+ }
+
+ /** \brief See if this rejector requires a target cloud */
+ bool
+ requiresTargetPoints() const override
+ {
+ return (true);
+ }
+
+ /** \brief Method for setting the target cloud */
+ void
+ setTargetPoints(pcl::PCLPointCloud2::ConstPtr cloud2) override
+ {
+ PointCloud<PointXYZ>::Ptr cloud(new PointCloud<PointXYZ>);
+ fromPCLPointCloud2(*cloud2, *cloud);
+ setInputTarget<PointXYZ>(cloud);
+ }
+
+ /** \brief Provide a pointer to the search object used to find correspondences in
+ * the target cloud.
+ * \param[in] tree a pointer to the spatial search object.
+ * \param[in] force_no_recompute If set to true, this tree will NEVER be
+ * recomputed, regardless of calls to setInputTarget. Only use if you are
+ * confident that the tree will be set correctly.
+ */
+ template <typename PointT>
+ inline void
+ setSearchMethodTarget(const typename pcl::search::KdTree<PointT>::Ptr& tree,
+ bool force_no_recompute = false)
+ {
+ static_pointer_cast<DataContainer<PointT>>(data_container_)
+ ->setSearchMethodTarget(tree, force_no_recompute);
+ }
+
+ /** \brief Set the factor for correspondence rejection. Points with distance greater
+ * than median times factor will be rejected \param[in] factor value
+ */
+ inline void
+ setMedianFactor(double factor)
+ {
+ factor_ = factor;
+ };
+
+ /** \brief Get the factor used for thresholding in correspondence rejection. */
+ inline double
+ getMedianFactor() const
+ {
+ return factor_;
+ };
+
+protected:
+ /** \brief Apply the rejection algorithm.
+ * \param[out] correspondences the set of resultant correspondences.
+ */
+ inline void
+ applyRejection(pcl::Correspondences& correspondences) override
+ {
+ getRemainingCorrespondences(*input_correspondences_, correspondences);
+ }
+
+ /** \brief The median distance threshold between two correspondent points in source
+ * <-> target.
+ */
+ double median_distance_;
+
+ /** \brief The factor for correspondence rejection. Points with distance greater than
+ * median times factor will be rejected
+ */
+ double factor_;
+
+ using DataContainerPtr = DataContainerInterface::Ptr;
+
+ /** \brief A pointer to the DataContainer object containing the input and target point
+ * clouds */
+ DataContainerPtr data_container_;
+};
+} // namespace registration
+} // namespace pcl
#include <pcl/registration/correspondence_rejection.h>
-namespace pcl
-{
- namespace registration
- {
- /** \brief CorrespondenceRejectorOneToOne implements a correspondence
- * rejection method based on eliminating duplicate match indices in
- * the correspondences. Correspondences with the same match index are
- * removed and only the one with smallest distance between query and
- * match are kept. That is, considering match->query 1-m correspondences
- * are removed leaving only 1-1 correspondences.
- * \author Dirk Holz
- * \ingroup registration
- */
- class PCL_EXPORTS CorrespondenceRejectorOneToOne: public CorrespondenceRejector
- {
- using CorrespondenceRejector::input_correspondences_;
- using CorrespondenceRejector::rejection_name_;
- using CorrespondenceRejector::getClassName;
-
- public:
- using Ptr = shared_ptr<CorrespondenceRejectorOneToOne>;
- using ConstPtr = shared_ptr<const CorrespondenceRejectorOneToOne>;
+namespace pcl {
+namespace registration {
+/** \brief CorrespondenceRejectorOneToOne implements a correspondence
+ * rejection method based on eliminating duplicate match indices in
+ * the correspondences. Correspondences with the same match index are
+ * removed and only the one with smallest distance between query and
+ * match are kept. That is, considering match->query 1-m correspondences
+ * are removed leaving only 1-1 correspondences.
+ * \author Dirk Holz
+ * \ingroup registration
+ */
+class PCL_EXPORTS CorrespondenceRejectorOneToOne : public CorrespondenceRejector {
+ using CorrespondenceRejector::getClassName;
+ using CorrespondenceRejector::input_correspondences_;
+ using CorrespondenceRejector::rejection_name_;
- /** \brief Empty constructor. */
- CorrespondenceRejectorOneToOne ()
- {
- rejection_name_ = "CorrespondenceRejectorOneToOne";
- }
+public:
+ using Ptr = shared_ptr<CorrespondenceRejectorOneToOne>;
+ using ConstPtr = shared_ptr<const CorrespondenceRejectorOneToOne>;
- /** \brief Get a list of valid correspondences after rejection from the original set of correspondences.
- * \param[in] original_correspondences the set of initial correspondences given
- * \param[out] remaining_correspondences the resultant filtered set of remaining correspondences
- */
- void
- getRemainingCorrespondences (const pcl::Correspondences& original_correspondences,
- pcl::Correspondences& remaining_correspondences) override;
+ /** \brief Empty constructor. */
+ CorrespondenceRejectorOneToOne()
+ {
+ rejection_name_ = "CorrespondenceRejectorOneToOne";
+ }
- protected:
- /** \brief Apply the rejection algorithm.
- * \param[out] correspondences the set of resultant correspondences.
- */
- inline void
- applyRejection (pcl::Correspondences &correspondences) override
- {
- getRemainingCorrespondences (*input_correspondences_, correspondences);
- }
- };
+ /** \brief Get a list of valid correspondences after rejection from the original set
+ * of correspondences. \param[in] original_correspondences the set of initial
+ * correspondences given \param[out] remaining_correspondences the resultant filtered
+ * set of remaining correspondences
+ */
+ void
+ getRemainingCorrespondences(const pcl::Correspondences& original_correspondences,
+ pcl::Correspondences& remaining_correspondences) override;
+protected:
+ /** \brief Apply the rejection algorithm.
+ * \param[out] correspondences the set of resultant correspondences.
+ */
+ inline void
+ applyRejection(pcl::Correspondences& correspondences) override
+ {
+ getRemainingCorrespondences(*input_correspondences_, correspondences);
}
-}
+};
-#include <pcl/registration/impl/correspondence_rejection_one_to_one.hpp>
+} // namespace registration
+} // namespace pcl
#pragma once
#include <pcl/registration/correspondence_rejection.h>
-#include <pcl/memory.h> // for static_pointer_cast
-
-namespace pcl
-{
- namespace registration
+#include <pcl/conversions.h> // for fromPCLPointCloud2
+#include <pcl/memory.h> // for static_pointer_cast
+
+namespace pcl {
+namespace registration {
+/**
+ * @brief The CorrespondenceRejectionOrganizedBoundary class implements a simple
+ * correspondence rejection measure. For each pair of points in correspondence, it
+ * checks whether they are on the boundary of a silhouette. This is done by counting the
+ * number of NaN dexels in a window around the points (the threshold and window size can
+ * be set by the user). \note Both the source and the target clouds need to be
+ * organized, otherwise all the correspondences will be rejected.
+ *
+ * \author Alexandru E. Ichim
+ * \ingroup registration
+ */
+class PCL_EXPORTS CorrespondenceRejectionOrganizedBoundary
+: public CorrespondenceRejector {
+public:
+ /** @brief Empty constructor. */
+ CorrespondenceRejectionOrganizedBoundary()
+ : boundary_nans_threshold_(8), window_size_(5), depth_step_threshold_(0.025f)
+ {}
+
+ void
+ getRemainingCorrespondences(const pcl::Correspondences& original_correspondences,
+ pcl::Correspondences& remaining_correspondences) override;
+
+ inline void
+ setNumberOfBoundaryNaNs(int val)
{
- /**
- * @brief The CorrespondenceRejectionOrganizedBoundary class implements a simple correspondence rejection measure.
- * For each pair of points in correspondence, it checks whether they are on the boundary of a silhouette. This is
- * done by counting the number of NaN dexels in a window around the points (the threshold and window size can be set
- * by the user).
- * \note Both the source and the target clouds need to be organized, otherwise all the correspondences will be rejected.
- *
- * \author Alexandru E. Ichim
- * \ingroup registration
- */
- class PCL_EXPORTS CorrespondenceRejectionOrganizedBoundary : public CorrespondenceRejector
- {
- public:
- /** @brief Empty constructor. */
- CorrespondenceRejectionOrganizedBoundary ()
- : boundary_nans_threshold_ (8)
- , window_size_ (5)
- , depth_step_threshold_ (0.025f)
- { }
-
- void
- getRemainingCorrespondences (const pcl::Correspondences& original_correspondences,
- pcl::Correspondences& remaining_correspondences) override;
-
- inline void
- setNumberOfBoundaryNaNs (int val)
- { boundary_nans_threshold_ = val; }
-
-
- template <typename PointT> inline void
- setInputSource (const typename pcl::PointCloud<PointT>::ConstPtr &cloud)
- {
- if (!data_container_)
- data_container_.reset (new pcl::registration::DataContainer<PointT>);
- static_pointer_cast<pcl::registration::DataContainer<PointT> > (data_container_)->setInputSource (cloud);
- }
-
- template <typename PointT> inline void
- setInputTarget (const typename pcl::PointCloud<PointT>::ConstPtr &cloud)
- {
- if (!data_container_)
- data_container_.reset (new pcl::registration::DataContainer<PointT>);
- static_pointer_cast<pcl::registration::DataContainer<PointT> > (data_container_)->setInputTarget (cloud);
- }
+ boundary_nans_threshold_ = val;
+ }
- /** \brief See if this rejector requires source points */
- bool
- requiresSourcePoints () const override
- { return (true); }
+ template <typename PointT>
+ inline void
+ setInputSource(const typename pcl::PointCloud<PointT>::ConstPtr& cloud)
+ {
+ if (!data_container_)
+ data_container_.reset(new pcl::registration::DataContainer<PointT>);
+ static_pointer_cast<pcl::registration::DataContainer<PointT>>(data_container_)
+ ->setInputSource(cloud);
+ }
- /** \brief Blob method for setting the source cloud */
- void
- setSourcePoints (pcl::PCLPointCloud2::ConstPtr cloud2) override
- {
- PointCloud<PointXYZ>::Ptr cloud (new PointCloud<PointXYZ>);
- fromPCLPointCloud2 (*cloud2, *cloud);
- setInputSource<PointXYZ> (cloud);
- }
-
- /** \brief See if this rejector requires a target cloud */
- bool
- requiresTargetPoints () const override
- { return (true); }
+ template <typename PointT>
+ inline void
+ setInputTarget(const typename pcl::PointCloud<PointT>::ConstPtr& cloud)
+ {
+ if (!data_container_)
+ data_container_.reset(new pcl::registration::DataContainer<PointT>);
+ static_pointer_cast<pcl::registration::DataContainer<PointT>>(data_container_)
+ ->setInputTarget(cloud);
+ }
- /** \brief Method for setting the target cloud */
- void
- setTargetPoints (pcl::PCLPointCloud2::ConstPtr cloud2) override
- {
- PointCloud<PointXYZ>::Ptr cloud (new PointCloud<PointXYZ>);
- fromPCLPointCloud2 (*cloud2, *cloud);
- setInputTarget<PointXYZ> (cloud);
- }
+ /** \brief See if this rejector requires source points */
+ bool
+ requiresSourcePoints() const override
+ {
+ return (true);
+ }
- virtual bool
- updateSource (const Eigen::Matrix4d &)
- { return (true); }
+ /** \brief Blob method for setting the source cloud */
+ void
+ setSourcePoints(pcl::PCLPointCloud2::ConstPtr cloud2) override
+ {
+ PointCloud<PointXYZ>::Ptr cloud(new PointCloud<PointXYZ>);
+ fromPCLPointCloud2(*cloud2, *cloud);
+ setInputSource<PointXYZ>(cloud);
+ }
- protected:
+ /** \brief See if this rejector requires a target cloud */
+ bool
+ requiresTargetPoints() const override
+ {
+ return (true);
+ }
- /** \brief Apply the rejection algorithm.
- * \param[out] correspondences the set of resultant correspondences.
- */
- inline void
- applyRejection (pcl::Correspondences &correspondences) override
- { getRemainingCorrespondences (*input_correspondences_, correspondences); }
+ /** \brief Method for setting the target cloud */
+ void
+ setTargetPoints(pcl::PCLPointCloud2::ConstPtr cloud2) override
+ {
+ PointCloud<PointXYZ>::Ptr cloud(new PointCloud<PointXYZ>);
+ fromPCLPointCloud2(*cloud2, *cloud);
+ setInputTarget<PointXYZ>(cloud);
+ }
- int boundary_nans_threshold_;
- int window_size_;
- float depth_step_threshold_;
+ virtual bool
+ updateSource(const Eigen::Matrix4d&)
+ {
+ return (true);
+ }
- using DataContainerPtr = DataContainerInterface::Ptr;
- DataContainerPtr data_container_;
- };
+protected:
+ /** \brief Apply the rejection algorithm.
+ * \param[out] correspondences the set of resultant correspondences.
+ */
+ inline void
+ applyRejection(pcl::Correspondences& correspondences) override
+ {
+ getRemainingCorrespondences(*input_correspondences_, correspondences);
}
-}
-#include <pcl/registration/impl/correspondence_rejection_organized_boundary.hpp>
+ int boundary_nans_threshold_;
+ int window_size_;
+ float depth_step_threshold_;
+
+ using DataContainerPtr = DataContainerInterface::Ptr;
+ DataContainerPtr data_container_;
+};
+} // namespace registration
+} // namespace pcl
#include <pcl/registration/correspondence_rejection.h>
#include <pcl/point_cloud.h>
-namespace pcl
-{
- namespace registration
+namespace pcl {
+namespace registration {
+/** \brief CorrespondenceRejectorPoly implements a correspondence rejection method that
+ * exploits low-level and pose-invariant geometric constraints between two point sets by
+ * forming virtual polygons of a user-specifiable cardinality on each model using the
+ * input correspondences. These polygons are then checked in a pose-invariant manner
+ * (i.e. the side lengths must be approximately equal), and rejection is performed by
+ * thresholding these edge lengths.
+ *
+ * If you use this in academic work, please cite:
+ *
+ * A. G. Buch, D. Kraft, J.-K. Kämäräinen, H. G. Petersen and N. Krüger.
+ * Pose Estimation using Local Structure-Specific Shape and Appearance Context.
+ * International Conference on Robotics and Automation (ICRA), 2013.
+ *
+ * \author Anders Glent Buch
+ * \ingroup registration
+ */
+template <typename SourceT, typename TargetT>
+class PCL_EXPORTS CorrespondenceRejectorPoly : public CorrespondenceRejector {
+ using CorrespondenceRejector::getClassName;
+ using CorrespondenceRejector::input_correspondences_;
+ using CorrespondenceRejector::rejection_name_;
+
+public:
+ using Ptr = shared_ptr<CorrespondenceRejectorPoly<SourceT, TargetT>>;
+ using ConstPtr = shared_ptr<const CorrespondenceRejectorPoly<SourceT, TargetT>>;
+
+ using PointCloudSource = pcl::PointCloud<SourceT>;
+ using PointCloudSourcePtr = typename PointCloudSource::Ptr;
+ using PointCloudSourceConstPtr = typename PointCloudSource::ConstPtr;
+
+ using PointCloudTarget = pcl::PointCloud<TargetT>;
+ using PointCloudTargetPtr = typename PointCloudTarget::Ptr;
+ using PointCloudTargetConstPtr = typename PointCloudTarget::ConstPtr;
+
+ /** \brief Empty constructor */
+ CorrespondenceRejectorPoly()
+ : iterations_(10000)
+ , cardinality_(3)
+ , similarity_threshold_(0.75f)
+ , similarity_threshold_squared_(0.75f * 0.75f)
+ {
+ rejection_name_ = "CorrespondenceRejectorPoly";
+ }
+
+ /** \brief Get a list of valid correspondences after rejection from the original set
+ * of correspondences. \param[in] original_correspondences the set of initial
+ * correspondences given \param[out] remaining_correspondences the resultant filtered
+ * set of remaining correspondences
+ */
+ void
+ getRemainingCorrespondences(const pcl::Correspondences& original_correspondences,
+ pcl::Correspondences& remaining_correspondences) override;
+
+ /** \brief Provide a source point cloud dataset (must contain XYZ data!), used to
+ * compute the correspondence distance. \param[in] cloud a cloud containing XYZ data
+ */
+ inline void
+ setInputSource(const PointCloudSourceConstPtr& cloud)
+ {
+ input_ = cloud;
+ }
+
+ /** \brief Provide a target point cloud dataset (must contain XYZ data!), used to
+ * compute the correspondence distance. \param[in] target a cloud containing XYZ data
+ */
+ inline void
+ setInputTarget(const PointCloudTargetConstPtr& target)
+ {
+ target_ = target;
+ }
+
+ /** \brief See if this rejector requires source points */
+ bool
+ requiresSourcePoints() const override
+ {
+ return (true);
+ }
+
+ /** \brief Blob method for setting the source cloud */
+ void
+ setSourcePoints(pcl::PCLPointCloud2::ConstPtr cloud2) override
+ {
+ PointCloudSourcePtr cloud(new PointCloudSource);
+ fromPCLPointCloud2(*cloud2, *cloud);
+ setInputSource(cloud);
+ }
+
+ /** \brief See if this rejector requires a target cloud */
+ bool
+ requiresTargetPoints() const override
{
- /** \brief CorrespondenceRejectorPoly implements a correspondence rejection method that exploits low-level and
- * pose-invariant geometric constraints between two point sets by forming virtual polygons of a user-specifiable
- * cardinality on each model using the input correspondences.
- * These polygons are then checked in a pose-invariant manner (i.e. the side lengths must be approximately equal),
- * and rejection is performed by thresholding these edge lengths.
- *
- * If you use this in academic work, please cite:
- *
- * A. G. Buch, D. Kraft, J.-K. Kämäräinen, H. G. Petersen and N. Krüger.
- * Pose Estimation using Local Structure-Specific Shape and Appearance Context.
- * International Conference on Robotics and Automation (ICRA), 2013.
- *
- * \author Anders Glent Buch
- * \ingroup registration
- */
- template <typename SourceT, typename TargetT>
- class PCL_EXPORTS CorrespondenceRejectorPoly: public CorrespondenceRejector
+ return (true);
+ }
+
+ /** \brief Method for setting the target cloud */
+ void
+ setTargetPoints(pcl::PCLPointCloud2::ConstPtr cloud2) override
+ {
+ PointCloudTargetPtr cloud(new PointCloudTarget);
+ fromPCLPointCloud2(*cloud2, *cloud);
+ setInputTarget(cloud);
+ }
+
+ /** \brief Set the polygon cardinality
+ * \param cardinality polygon cardinality
+ */
+ inline void
+ setCardinality(int cardinality)
+ {
+ cardinality_ = cardinality;
+ }
+
+ /** \brief Get the polygon cardinality
+ * \return polygon cardinality
+ */
+ inline int
+ getCardinality()
+ {
+ return (cardinality_);
+ }
+
+ /** \brief Set the similarity threshold in [0,1[ between edge lengths,
+ * where 1 is a perfect match
+ * \param similarity_threshold similarity threshold
+ */
+ inline void
+ setSimilarityThreshold(float similarity_threshold)
+ {
+ similarity_threshold_ = similarity_threshold;
+ similarity_threshold_squared_ = similarity_threshold * similarity_threshold;
+ }
+
+ /** \brief Get the similarity threshold between edge lengths
+ * \return similarity threshold
+ */
+ inline float
+ getSimilarityThreshold()
+ {
+ return (similarity_threshold_);
+ }
+
+ /** \brief Set the number of iterations
+ * \param iterations number of iterations
+ */
+ inline void
+ setIterations(int iterations)
+ {
+ iterations_ = iterations;
+ }
+
+ /** \brief Get the number of iterations
+ * \return number of iterations
+ */
+ inline int
+ getIterations()
+ {
+ return (iterations_);
+ }
+
+ /** \brief Polygonal rejection of a single polygon, indexed by a subset of
+ * correspondences \param corr all correspondences into \ref input_ and \ref target_
+ * \param idx sampled indices into \b correspondences, must have a size equal to \ref
+ * cardinality_ \return true if all edge length ratios are larger than or equal to
+ * \ref similarity_threshold_
+ */
+ inline bool
+ thresholdPolygon(const pcl::Correspondences& corr, const std::vector<int>& idx)
+ {
+ if (cardinality_ ==
+ 2) // Special case: when two points are considered, we only have one edge
{
- using CorrespondenceRejector::input_correspondences_;
- using CorrespondenceRejector::rejection_name_;
- using CorrespondenceRejector::getClassName;
-
- public:
- using Ptr = shared_ptr<CorrespondenceRejectorPoly<SourceT, TargetT> >;
- using ConstPtr = shared_ptr<const CorrespondenceRejectorPoly<SourceT, TargetT> >;
-
- using PointCloudSource = pcl::PointCloud<SourceT>;
- using PointCloudSourcePtr = typename PointCloudSource::Ptr;
- using PointCloudSourceConstPtr = typename PointCloudSource::ConstPtr;
-
- using PointCloudTarget = pcl::PointCloud<TargetT>;
- using PointCloudTargetPtr = typename PointCloudTarget::Ptr;
- using PointCloudTargetConstPtr = typename PointCloudTarget::ConstPtr;
-
- /** \brief Empty constructor */
- CorrespondenceRejectorPoly ()
- : iterations_ (10000)
- , cardinality_ (3)
- , similarity_threshold_ (0.75f)
- , similarity_threshold_squared_ (0.75f * 0.75f)
- {
- rejection_name_ = "CorrespondenceRejectorPoly";
- }
-
- /** \brief Get a list of valid correspondences after rejection from the original set of correspondences.
- * \param[in] original_correspondences the set of initial correspondences given
- * \param[out] remaining_correspondences the resultant filtered set of remaining correspondences
- */
- void
- getRemainingCorrespondences (const pcl::Correspondences& original_correspondences,
- pcl::Correspondences& remaining_correspondences) override;
-
- /** \brief Provide a source point cloud dataset (must contain XYZ data!), used to compute the correspondence distance.
- * \param[in] cloud a cloud containing XYZ data
- */
- inline void
- setInputSource (const PointCloudSourceConstPtr &cloud)
- {
- input_ = cloud;
- }
-
- /** \brief Provide a source point cloud dataset (must contain XYZ data!), used to compute the correspondence distance.
- * \param[in] cloud a cloud containing XYZ data
- */
- PCL_DEPRECATED(1, 12, "pcl::registration::CorrespondenceRejectorPoly::setInputCloud is deprecated. Please use setInputSource instead")
- inline void
- setInputCloud (const PointCloudSourceConstPtr &cloud)
- {
- input_ = cloud;
- }
-
- /** \brief Provide a target point cloud dataset (must contain XYZ data!), used to compute the correspondence distance.
- * \param[in] target a cloud containing XYZ data
- */
- inline void
- setInputTarget (const PointCloudTargetConstPtr &target)
- {
- target_ = target;
- }
-
- /** \brief See if this rejector requires source points */
- bool
- requiresSourcePoints () const override
- { return (true); }
-
- /** \brief Blob method for setting the source cloud */
- void
- setSourcePoints (pcl::PCLPointCloud2::ConstPtr cloud2) override
- {
- PointCloudSourcePtr cloud (new PointCloudSource);
- fromPCLPointCloud2 (*cloud2, *cloud);
- setInputSource (cloud);
- }
-
- /** \brief See if this rejector requires a target cloud */
- bool
- requiresTargetPoints () const override
- { return (true); }
-
- /** \brief Method for setting the target cloud */
- void
- setTargetPoints (pcl::PCLPointCloud2::ConstPtr cloud2) override
- {
- PointCloudTargetPtr cloud (new PointCloudTarget);
- fromPCLPointCloud2 (*cloud2, *cloud);
- setInputTarget (cloud);
- }
-
- /** \brief Set the polygon cardinality
- * \param cardinality polygon cardinality
- */
- inline void
- setCardinality (int cardinality)
- {
- cardinality_ = cardinality;
- }
-
- /** \brief Get the polygon cardinality
- * \return polygon cardinality
- */
- inline int
- getCardinality ()
- {
- return (cardinality_);
- }
-
- /** \brief Set the similarity threshold in [0,1[ between edge lengths,
- * where 1 is a perfect match
- * \param similarity_threshold similarity threshold
- */
- inline void
- setSimilarityThreshold (float similarity_threshold)
- {
- similarity_threshold_ = similarity_threshold;
- similarity_threshold_squared_ = similarity_threshold * similarity_threshold;
- }
-
- /** \brief Get the similarity threshold between edge lengths
- * \return similarity threshold
- */
- inline float
- getSimilarityThreshold ()
- {
- return (similarity_threshold_);
- }
-
- /** \brief Set the number of iterations
- * \param iterations number of iterations
- */
- inline void
- setIterations (int iterations)
- {
- iterations_ = iterations;
- }
-
- /** \brief Get the number of iterations
- * \return number of iterations
- */
- inline int
- getIterations ()
- {
- return (iterations_);
- }
-
- /** \brief Polygonal rejection of a single polygon, indexed by a subset of correspondences
- * \param corr all correspondences into \ref input_ and \ref target_
- * \param idx sampled indices into \b correspondences, must have a size equal to \ref cardinality_
- * \return true if all edge length ratios are larger than or equal to \ref similarity_threshold_
- */
- inline bool
- thresholdPolygon (const pcl::Correspondences& corr, const std::vector<int>& idx)
- {
- if (cardinality_ == 2) // Special case: when two points are considered, we only have one edge
- {
- return (thresholdEdgeLength (corr[ idx[0] ].index_query, corr[ idx[1] ].index_query,
- corr[ idx[0] ].index_match, corr[ idx[1] ].index_match,
- cardinality_));
- }
- // Otherwise check all edges
- for (int i = 0; i < cardinality_; ++i)
- {
- if (!thresholdEdgeLength (corr[ idx[i] ].index_query, corr[ idx[(i+1)%cardinality_] ].index_query,
- corr[ idx[i] ].index_match, corr[ idx[(i+1)%cardinality_] ].index_match,
- similarity_threshold_squared_))
- {
- return (false);
- }
- }
- return (true);
- }
-
- /** \brief Polygonal rejection of a single polygon, indexed by two point index vectors
- * \param source_indices indices of polygon points in \ref input_, must have a size equal to \ref cardinality_
- * \param target_indices corresponding indices of polygon points in \ref target_, must have a size equal to \ref cardinality_
- * \return true if all edge length ratios are larger than or equal to \ref similarity_threshold_
- */
- inline bool
- thresholdPolygon (const std::vector<int>& source_indices, const std::vector<int>& target_indices)
- {
- // Convert indices to correspondences and an index vector pointing to each element
- pcl::Correspondences corr (cardinality_);
- std::vector<int> idx (cardinality_);
- for (int i = 0; i < cardinality_; ++i)
- {
- corr[i].index_query = source_indices[i];
- corr[i].index_match = target_indices[i];
- idx[i] = i;
- }
-
- return (thresholdPolygon (corr, idx));
- }
-
- protected:
- /** \brief Apply the rejection algorithm.
- * \param[out] correspondences the set of resultant correspondences.
- */
- inline void
- applyRejection (pcl::Correspondences &correspondences) override
- {
- getRemainingCorrespondences (*input_correspondences_, correspondences);
- }
-
- /** \brief Get k unique random indices in range {0,...,n-1} (sampling without replacement)
- * \note No check is made to ensure that k <= n.
- * \param n upper index range, exclusive
- * \param k number of unique indices to sample
- * \return k unique random indices in range {0,...,n-1}
- */
- inline std::vector<int>
- getUniqueRandomIndices (int n, int k)
- {
- // Marked sampled indices and sample counter
- std::vector<bool> sampled (n, false);
- int samples = 0;
- // Resulting unique indices
- std::vector<int> result;
- result.reserve (k);
- do
- {
- // Pick a random index in the range
- const int idx = (std::rand () % n);
- // If unique
- if (!sampled[idx])
- {
- // Mark as sampled and increment result counter
- sampled[idx] = true;
- ++samples;
- // Store
- result.push_back (idx);
- }
- }
- while (samples < k);
-
- return (result);
- }
-
- /** \brief Squared Euclidean distance between two points using the members x, y and z
- * \param p1 first point
- * \param p2 second point
- * \return squared Euclidean distance
- */
- inline float
- computeSquaredDistance (const SourceT& p1, const TargetT& p2)
- {
- const float dx = p2.x - p1.x;
- const float dy = p2.y - p1.y;
- const float dz = p2.z - p1.z;
-
- return (dx*dx + dy*dy + dz*dz);
- }
-
- /** \brief Edge length similarity thresholding
- * \param index_query_1 index of first source vertex
- * \param index_query_2 index of second source vertex
- * \param index_match_1 index of first target vertex
- * \param index_match_2 index of second target vertex
- * \param simsq squared similarity threshold in [0,1]
- * \return true if edge length ratio is larger than or equal to threshold
- */
- inline bool
- thresholdEdgeLength (int index_query_1,
- int index_query_2,
- int index_match_1,
- int index_match_2,
- float simsq)
- {
- // Distance between source points
- const float dist_src = computeSquaredDistance ((*input_)[index_query_1], (*input_)[index_query_2]);
- // Distance between target points
- const float dist_tgt = computeSquaredDistance ((*target_)[index_match_1], (*target_)[index_match_2]);
- // Edge length similarity [0,1] where 1 is a perfect match
- const float edge_sim = (dist_src < dist_tgt ? dist_src / dist_tgt : dist_tgt / dist_src);
-
- return (edge_sim >= simsq);
- }
-
- /** \brief Compute a linear histogram. This function is equivalent to the MATLAB function \b histc, with the
- * edges set as follows: <b> lower:(upper-lower)/bins:upper </b>
- * \param data input samples
- * \param lower lower bound of input samples
- * \param upper upper bound of input samples
- * \param bins number of bins in output
- * \return linear histogram
- */
- std::vector<int>
- computeHistogram (const std::vector<float>& data, float lower, float upper, int bins);
-
- /** \brief Find the optimal value for binary histogram thresholding using Otsu's method
- * \param histogram input histogram
- * \return threshold value according to Otsu's criterion
- */
- int
- findThresholdOtsu (const std::vector<int>& histogram);
-
- /** \brief The input point cloud dataset */
- PointCloudSourceConstPtr input_;
-
- /** \brief The input point cloud dataset target */
- PointCloudTargetConstPtr target_;
-
- /** \brief Number of iterations to run */
- int iterations_;
-
- /** \brief The polygon cardinality used during rejection */
- int cardinality_;
-
- /** \brief Lower edge length threshold in [0,1] used for verifying polygon similarities, where 1 is a perfect match */
- float similarity_threshold_;
-
- /** \brief Squared value if \ref similarity_threshold_, only for internal use */
- float similarity_threshold_squared_;
- };
+ return (thresholdEdgeLength(corr[idx[0]].index_query,
+ corr[idx[1]].index_query,
+ corr[idx[0]].index_match,
+ corr[idx[1]].index_match,
+ cardinality_));
+ }
+ // Otherwise check all edges
+ for (int i = 0; i < cardinality_; ++i) {
+ if (!thresholdEdgeLength(corr[idx[i]].index_query,
+ corr[idx[(i + 1) % cardinality_]].index_query,
+ corr[idx[i]].index_match,
+ corr[idx[(i + 1) % cardinality_]].index_match,
+ similarity_threshold_squared_)) {
+ return (false);
+ }
+ }
+ return (true);
}
-}
+
+ /** \brief Polygonal rejection of a single polygon, indexed by two point index vectors
+ * \param source_indices indices of polygon points in \ref input_, must have a size
+ * equal to \ref cardinality_ \param target_indices corresponding indices of polygon
+ * points in \ref target_, must have a size equal to \ref cardinality_ \return true if
+ * all edge length ratios are larger than or equal to \ref similarity_threshold_
+ */
+ inline bool
+ thresholdPolygon(const pcl::Indices& source_indices,
+ const pcl::Indices& target_indices)
+ {
+ // Convert indices to correspondences and an index vector pointing to each element
+ pcl::Correspondences corr(cardinality_);
+ std::vector<int> idx(cardinality_);
+ for (int i = 0; i < cardinality_; ++i) {
+ corr[i].index_query = source_indices[i];
+ corr[i].index_match = target_indices[i];
+ idx[i] = i;
+ }
+
+ return (thresholdPolygon(corr, idx));
+ }
+
+protected:
+ /** \brief Apply the rejection algorithm.
+ * \param[out] correspondences the set of resultant correspondences.
+ */
+ inline void
+ applyRejection(pcl::Correspondences& correspondences) override
+ {
+ getRemainingCorrespondences(*input_correspondences_, correspondences);
+ }
+
+ /** \brief Get k unique random indices in range {0,...,n-1} (sampling without
+ * replacement) \note No check is made to ensure that k <= n. \param n upper index
+ * range, exclusive \param k number of unique indices to sample \return k unique
+ * random indices in range {0,...,n-1}
+ */
+ inline std::vector<int>
+ getUniqueRandomIndices(int n, int k)
+ {
+ // Marked sampled indices and sample counter
+ std::vector<bool> sampled(n, false);
+ int samples = 0;
+ // Resulting unique indices
+ std::vector<int> result;
+ result.reserve(k);
+ do {
+ // Pick a random index in the range
+ const int idx = (std::rand() % n);
+ // If unique
+ if (!sampled[idx]) {
+ // Mark as sampled and increment result counter
+ sampled[idx] = true;
+ ++samples;
+ // Store
+ result.push_back(idx);
+ }
+ } while (samples < k);
+
+ return (result);
+ }
+
+ /** \brief Squared Euclidean distance between two points using the members x, y and z
+ * \param p1 first point
+ * \param p2 second point
+ * \return squared Euclidean distance
+ */
+ inline float
+ computeSquaredDistance(const SourceT& p1, const TargetT& p2)
+ {
+ const float dx = p2.x - p1.x;
+ const float dy = p2.y - p1.y;
+ const float dz = p2.z - p1.z;
+
+ return (dx * dx + dy * dy + dz * dz);
+ }
+
+ /** \brief Edge length similarity thresholding
+ * \param index_query_1 index of first source vertex
+ * \param index_query_2 index of second source vertex
+ * \param index_match_1 index of first target vertex
+ * \param index_match_2 index of second target vertex
+ * \param simsq squared similarity threshold in [0,1]
+ * \return true if edge length ratio is larger than or equal to threshold
+ */
+ inline bool
+ thresholdEdgeLength(int index_query_1,
+ int index_query_2,
+ int index_match_1,
+ int index_match_2,
+ float simsq)
+ {
+ // Distance between source points
+ const float dist_src =
+ computeSquaredDistance((*input_)[index_query_1], (*input_)[index_query_2]);
+ // Distance between target points
+ const float dist_tgt =
+ computeSquaredDistance((*target_)[index_match_1], (*target_)[index_match_2]);
+ // Edge length similarity [0,1] where 1 is a perfect match
+ const float edge_sim =
+ (dist_src < dist_tgt ? dist_src / dist_tgt : dist_tgt / dist_src);
+
+ return (edge_sim >= simsq);
+ }
+
+ /** \brief Compute a linear histogram. This function is equivalent to the MATLAB
+ * function \b histc, with the edges set as follows: <b>
+ * lower:(upper-lower)/bins:upper </b> \param data input samples \param lower lower
+ * bound of input samples \param upper upper bound of input samples \param bins number
+ * of bins in output \return linear histogram
+ */
+ std::vector<int>
+ computeHistogram(const std::vector<float>& data, float lower, float upper, int bins);
+
+ /** \brief Find the optimal value for binary histogram thresholding using Otsu's
+ * method \param histogram input histogram \return threshold value according to Otsu's
+ * criterion
+ */
+ int
+ findThresholdOtsu(const std::vector<int>& histogram);
+
+ /** \brief The input point cloud dataset */
+ PointCloudSourceConstPtr input_;
+
+ /** \brief The input point cloud dataset target */
+ PointCloudTargetConstPtr target_;
+
+ /** \brief Number of iterations to run */
+ int iterations_;
+
+ /** \brief The polygon cardinality used during rejection */
+ int cardinality_;
+
+ /** \brief Lower edge length threshold in [0,1] used for verifying polygon
+ * similarities, where 1 is a perfect match */
+ float similarity_threshold_;
+
+ /** \brief Squared value if \ref similarity_threshold_, only for internal use */
+ float similarity_threshold_squared_;
+};
+} // namespace registration
+} // namespace pcl
#include <pcl/registration/impl/correspondence_rejection_poly.hpp>
#pragma once
-
-#include <pcl/memory.h>
#include <pcl/registration/correspondence_rejection.h>
+#include <pcl/memory.h>
+
+namespace pcl {
+namespace registration {
+/** \brief CorrespondenceRejectorSampleConsensus implements a correspondence rejection
+ * using Random Sample Consensus to identify inliers (and reject outliers)
+ * \author Dirk Holz
+ * \ingroup registration
+ */
+template <typename PointT>
+class CorrespondenceRejectorSampleConsensus : public CorrespondenceRejector {
+ using PointCloud = pcl::PointCloud<PointT>;
+ using PointCloudPtr = typename PointCloud::Ptr;
+ using PointCloudConstPtr = typename PointCloud::ConstPtr;
+
+public:
+ using CorrespondenceRejector::getClassName;
+ using CorrespondenceRejector::input_correspondences_;
+ using CorrespondenceRejector::rejection_name_;
+
+ using Ptr = shared_ptr<CorrespondenceRejectorSampleConsensus<PointT>>;
+ using ConstPtr = shared_ptr<const CorrespondenceRejectorSampleConsensus<PointT>>;
+
+ /** \brief Empty constructor. Sets the inlier threshold to 5cm (0.05m),
+ * and the maximum number of iterations to 1000.
+ */
+ CorrespondenceRejectorSampleConsensus()
+ : inlier_threshold_(0.05)
+ , max_iterations_(1000) // std::numeric_limits<int>::max ()
+ , input_()
+ , input_transformed_()
+ , target_()
+ , refine_(false)
+ , save_inliers_(false)
+ {
+ rejection_name_ = "CorrespondenceRejectorSampleConsensus";
+ }
+
+ /** \brief Empty destructor. */
+ ~CorrespondenceRejectorSampleConsensus() {}
+
+ /** \brief Get a list of valid correspondences after rejection from the original set
+ * of correspondences. \param[in] original_correspondences the set of initial
+ * correspondences given \param[out] remaining_correspondences the resultant filtered
+ * set of remaining correspondences
+ */
+ inline void
+ getRemainingCorrespondences(const pcl::Correspondences& original_correspondences,
+ pcl::Correspondences& remaining_correspondences) override;
+
+ /** \brief Provide a source point cloud dataset (must contain XYZ data!)
+ * \param[in] cloud a cloud containing XYZ data
+ */
+ virtual inline void
+ setInputSource(const PointCloudConstPtr& cloud)
+ {
+ input_ = cloud;
+ }
+
+ /** \brief Get a pointer to the input point cloud dataset target. */
+ inline PointCloudConstPtr const
+ getInputSource()
+ {
+ return (input_);
+ }
+
+ /** \brief Provide a target point cloud dataset (must contain XYZ data!)
+ * \param[in] cloud a cloud containing XYZ data
+ */
+ virtual inline void
+ setInputTarget(const PointCloudConstPtr& cloud)
+ {
+ target_ = cloud;
+ }
+
+ /** \brief Get a pointer to the input point cloud dataset target. */
+ inline PointCloudConstPtr const
+ getInputTarget()
+ {
+ return (target_);
+ }
+
+ /** \brief See if this rejector requires source points */
+ bool
+ requiresSourcePoints() const override
+ {
+ return (true);
+ }
+
+ /** \brief Blob method for setting the source cloud */
+ void
+ setSourcePoints(pcl::PCLPointCloud2::ConstPtr cloud2) override
+ {
+ PointCloudPtr cloud(new PointCloud);
+ fromPCLPointCloud2(*cloud2, *cloud);
+ setInputSource(cloud);
+ }
+
+ /** \brief See if this rejector requires a target cloud */
+ bool
+ requiresTargetPoints() const override
+ {
+ return (true);
+ }
+
+ /** \brief Method for setting the target cloud */
+ void
+ setTargetPoints(pcl::PCLPointCloud2::ConstPtr cloud2) override
+ {
+ PointCloudPtr cloud(new PointCloud);
+ fromPCLPointCloud2(*cloud2, *cloud);
+ setInputTarget(cloud);
+ }
+
+ /** \brief Set the maximum distance between corresponding points.
+ * Correspondences with distances below the threshold are considered as inliers.
+ * \param[in] threshold Distance threshold in the same dimension as source and target
+ * data sets.
+ */
+ inline void
+ setInlierThreshold(double threshold)
+ {
+ inlier_threshold_ = threshold;
+ };
+
+ /** \brief Get the maximum distance between corresponding points.
+ * \return Distance threshold in the same dimension as source and target data sets.
+ */
+ inline double
+ getInlierThreshold()
+ {
+ return inlier_threshold_;
+ };
+
+ /** \brief Set the maximum number of iterations.
+ * \param[in] max_iterations Maximum number if iterations to run
+ */
+ inline void
+ setMaximumIterations(int max_iterations)
+ {
+ max_iterations_ = std::max(max_iterations, 0);
+ }
+
+ /** \brief Get the maximum number of iterations.
+ * \return max_iterations Maximum number if iterations to run
+ */
+ inline int
+ getMaximumIterations()
+ {
+ return (max_iterations_);
+ }
+
+ /** \brief Get the best transformation after RANSAC rejection.
+ * \return The homogeneous 4x4 transformation yielding the largest number of inliers.
+ */
+ inline Eigen::Matrix4f
+ getBestTransformation()
+ {
+ return best_transformation_;
+ };
+
+ /** \brief Specify whether the model should be refined internally using the variance
+ * of the inliers \param[in] refine true if the model should be refined, false
+ * otherwise
+ */
+ inline void
+ setRefineModel(const bool refine)
+ {
+ refine_ = refine;
+ }
+
+ /** \brief Get the internal refine parameter value as set by the user using
+ * setRefineModel */
+ inline bool
+ getRefineModel() const
+ {
+ return (refine_);
+ }
+
+ /** \brief Get the inlier indices found by the correspondence rejector. This
+ * information is only saved if setSaveInliers(true) was called in advance.
+ * \param[out] inlier_indices Indices for the inliers
+ */
+ inline void
+ getInliersIndices(pcl::Indices& inlier_indices)
+ {
+ inlier_indices = inlier_indices_;
+ }
+
+ /** \brief Set whether to save inliers or not
+ * \param[in] s True to save inliers / False otherwise
+ */
+ inline void
+ setSaveInliers(bool s)
+ {
+ save_inliers_ = s;
+ }
+
+ /** \brief Get whether the rejector is configured to save inliers */
+ inline bool
+ getSaveInliers()
+ {
+ return save_inliers_;
+ }
+
+protected:
+ /** \brief Apply the rejection algorithm.
+ * \param[out] correspondences the set of resultant correspondences.
+ */
+ inline void
+ applyRejection(pcl::Correspondences& correspondences) override
+ {
+ getRemainingCorrespondences(*input_correspondences_, correspondences);
+ }
+
+ double inlier_threshold_;
+
+ int max_iterations_;
+
+ PointCloudConstPtr input_;
+ PointCloudPtr input_transformed_;
+ PointCloudConstPtr target_;
+
+ Eigen::Matrix4f best_transformation_;
+
+ bool refine_;
+ pcl::Indices inlier_indices_;
+ bool save_inliers_;
-#include <pcl/common/transforms.h>
-
-namespace pcl
-{
- namespace registration
- {
- /** \brief CorrespondenceRejectorSampleConsensus implements a correspondence rejection
- * using Random Sample Consensus to identify inliers (and reject outliers)
- * \author Dirk Holz
- * \ingroup registration
- */
- template <typename PointT>
- class CorrespondenceRejectorSampleConsensus: public CorrespondenceRejector
- {
- using PointCloud = pcl::PointCloud<PointT>;
- using PointCloudPtr = typename PointCloud::Ptr;
- using PointCloudConstPtr = typename PointCloud::ConstPtr;
-
- public:
- using CorrespondenceRejector::input_correspondences_;
- using CorrespondenceRejector::rejection_name_;
- using CorrespondenceRejector::getClassName;
-
- using Ptr = shared_ptr<CorrespondenceRejectorSampleConsensus<PointT> >;
- using ConstPtr = shared_ptr<const CorrespondenceRejectorSampleConsensus<PointT> >;
-
- /** \brief Empty constructor. Sets the inlier threshold to 5cm (0.05m),
- * and the maximum number of iterations to 1000.
- */
- CorrespondenceRejectorSampleConsensus ()
- : inlier_threshold_ (0.05)
- , max_iterations_ (1000) // std::numeric_limits<int>::max ()
- , input_ ()
- , input_transformed_ ()
- , target_ ()
- , refine_ (false)
- , save_inliers_ (false)
- {
- rejection_name_ = "CorrespondenceRejectorSampleConsensus";
- }
-
- /** \brief Empty destructor. */
- ~CorrespondenceRejectorSampleConsensus () {}
-
- /** \brief Get a list of valid correspondences after rejection from the original set of correspondences.
- * \param[in] original_correspondences the set of initial correspondences given
- * \param[out] remaining_correspondences the resultant filtered set of remaining correspondences
- */
- inline void
- getRemainingCorrespondences (const pcl::Correspondences& original_correspondences,
- pcl::Correspondences& remaining_correspondences) override;
-
- /** \brief Provide a source point cloud dataset (must contain XYZ data!)
- * \param[in] cloud a cloud containing XYZ data
- */
- virtual inline void
- setInputSource (const PointCloudConstPtr &cloud)
- {
- input_ = cloud;
- }
-
- /** \brief Get a pointer to the input point cloud dataset target. */
- inline PointCloudConstPtr const
- getInputSource () { return (input_); }
-
- /** \brief Provide a target point cloud dataset (must contain XYZ data!)
- * \param[in] cloud a cloud containing XYZ data
- */
- virtual inline void
- setInputTarget (const PointCloudConstPtr &cloud) { target_ = cloud; }
-
- /** \brief Get a pointer to the input point cloud dataset target. */
- inline PointCloudConstPtr const
- getInputTarget () { return (target_ ); }
-
-
- /** \brief See if this rejector requires source points */
- bool
- requiresSourcePoints () const override
- { return (true); }
-
- /** \brief Blob method for setting the source cloud */
- void
- setSourcePoints (pcl::PCLPointCloud2::ConstPtr cloud2) override
- {
- PointCloudPtr cloud (new PointCloud);
- fromPCLPointCloud2 (*cloud2, *cloud);
- setInputSource (cloud);
- }
-
- /** \brief See if this rejector requires a target cloud */
- bool
- requiresTargetPoints () const override
- { return (true); }
-
- /** \brief Method for setting the target cloud */
- void
- setTargetPoints (pcl::PCLPointCloud2::ConstPtr cloud2) override
- {
- PointCloudPtr cloud (new PointCloud);
- fromPCLPointCloud2 (*cloud2, *cloud);
- setInputTarget (cloud);
- }
-
- /** \brief Set the maximum distance between corresponding points.
- * Correspondences with distances below the threshold are considered as inliers.
- * \param[in] threshold Distance threshold in the same dimension as source and target data sets.
- */
- inline void
- setInlierThreshold (double threshold) { inlier_threshold_ = threshold; };
-
- /** \brief Get the maximum distance between corresponding points.
- * \return Distance threshold in the same dimension as source and target data sets.
- */
- inline double
- getInlierThreshold () { return inlier_threshold_; };
-
- /** \brief Set the maximum number of iterations.
- * \param[in] max_iterations Maximum number if iterations to run
- */
- inline void
- setMaximumIterations (int max_iterations) { max_iterations_ = std::max (max_iterations, 0); }
-
- /** \brief Get the maximum number of iterations.
- * \return max_iterations Maximum number if iterations to run
- */
- inline int
- getMaximumIterations () { return (max_iterations_); }
-
- /** \brief Get the best transformation after RANSAC rejection.
- * \return The homogeneous 4x4 transformation yielding the largest number of inliers.
- */
- inline Eigen::Matrix4f
- getBestTransformation () { return best_transformation_; };
-
- /** \brief Specify whether the model should be refined internally using the variance of the inliers
- * \param[in] refine true if the model should be refined, false otherwise
- */
- inline void
- setRefineModel (const bool refine)
- {
- refine_ = refine;
- }
-
- /** \brief Get the internal refine parameter value as set by the user using setRefineModel */
- inline bool
- getRefineModel () const
- {
- return (refine_);
- }
-
- /** \brief Get the inlier indices found by the correspondence rejector. This information is only saved if setSaveInliers(true) was called in advance.
- * \param[out] inlier_indices Indices for the inliers
- */
- inline void
- getInliersIndices (std::vector<int> &inlier_indices) { inlier_indices = inlier_indices_; }
-
- /** \brief Set whether to save inliers or not
- * \param[in] s True to save inliers / False otherwise
- */
- inline void
- setSaveInliers (bool s) { save_inliers_ = s; }
-
- /** \brief Get whether the rejector is configured to save inliers */
- inline bool
- getSaveInliers () { return save_inliers_; }
-
-
- protected:
-
- /** \brief Apply the rejection algorithm.
- * \param[out] correspondences the set of resultant correspondences.
- */
- inline void
- applyRejection (pcl::Correspondences &correspondences) override
- {
- getRemainingCorrespondences (*input_correspondences_, correspondences);
- }
-
- double inlier_threshold_;
-
- int max_iterations_;
-
- PointCloudConstPtr input_;
- PointCloudPtr input_transformed_;
- PointCloudConstPtr target_;
-
- Eigen::Matrix4f best_transformation_;
-
- bool refine_;
- std::vector<int> inlier_indices_;
- bool save_inliers_;
-
- public:
- PCL_MAKE_ALIGNED_OPERATOR_NEW
- };
- }
-}
+public:
+ PCL_MAKE_ALIGNED_OPERATOR_NEW
+};
+} // namespace registration
+} // namespace pcl
#include <pcl/registration/impl/correspondence_rejection_sample_consensus.hpp>
#pragma once
-#include <pcl/memory.h>
#include <pcl/registration/correspondence_rejection_sample_consensus.h>
+#include <pcl/memory.h>
-namespace pcl
-{
- namespace registration
+namespace pcl {
+namespace registration {
+/** \brief CorrespondenceRejectorSampleConsensus2D implements a pixel-based
+ * correspondence rejection using Random Sample Consensus to identify inliers
+ * (and reject outliers)
+ * \author Radu B. Rusu
+ * \ingroup registration
+ */
+template <typename PointT>
+class CorrespondenceRejectorSampleConsensus2D
+: public CorrespondenceRejectorSampleConsensus<PointT> {
+ using PointCloud = pcl::PointCloud<PointT>;
+ using PointCloudPtr = typename PointCloud::Ptr;
+ using PointCloudConstPtr = typename PointCloud::ConstPtr;
+
+public:
+ using CorrespondenceRejectorSampleConsensus<PointT>::refine_;
+ using CorrespondenceRejectorSampleConsensus<PointT>::input_;
+ using CorrespondenceRejectorSampleConsensus<PointT>::target_;
+ using CorrespondenceRejectorSampleConsensus<PointT>::input_correspondences_;
+ using CorrespondenceRejectorSampleConsensus<PointT>::rejection_name_;
+ using CorrespondenceRejectorSampleConsensus<PointT>::getClassName;
+ using CorrespondenceRejectorSampleConsensus<PointT>::inlier_threshold_;
+ using CorrespondenceRejectorSampleConsensus<PointT>::max_iterations_;
+ using CorrespondenceRejectorSampleConsensus<PointT>::best_transformation_;
+
+ using Ptr = shared_ptr<CorrespondenceRejectorSampleConsensus2D<PointT>>;
+ using ConstPtr = shared_ptr<const CorrespondenceRejectorSampleConsensus2D<PointT>>;
+
+ /** \brief Empty constructor. Sets the inlier threshold to 5cm (0.05m),
+ * and the maximum number of iterations to 1000.
+ */
+ CorrespondenceRejectorSampleConsensus2D()
+ : projection_matrix_(Eigen::Matrix3f::Identity())
{
- /** \brief CorrespondenceRejectorSampleConsensus2D implements a pixel-based
- * correspondence rejection using Random Sample Consensus to identify inliers
- * (and reject outliers)
- * \author Radu B. Rusu
- * \ingroup registration
- */
- template <typename PointT>
- class CorrespondenceRejectorSampleConsensus2D: public CorrespondenceRejectorSampleConsensus<PointT>
- {
- using PointCloud = pcl::PointCloud<PointT>;
- using PointCloudPtr = typename PointCloud::Ptr;
- using PointCloudConstPtr = typename PointCloud::ConstPtr;
-
- public:
- using CorrespondenceRejectorSampleConsensus<PointT>::refine_;
- using CorrespondenceRejectorSampleConsensus<PointT>::input_;
- using CorrespondenceRejectorSampleConsensus<PointT>::target_;
- using CorrespondenceRejectorSampleConsensus<PointT>::input_correspondences_;
- using CorrespondenceRejectorSampleConsensus<PointT>::rejection_name_;
- using CorrespondenceRejectorSampleConsensus<PointT>::getClassName;
- using CorrespondenceRejectorSampleConsensus<PointT>::inlier_threshold_;
- using CorrespondenceRejectorSampleConsensus<PointT>::max_iterations_;
- using CorrespondenceRejectorSampleConsensus<PointT>::best_transformation_;
-
- using Ptr = shared_ptr<CorrespondenceRejectorSampleConsensus2D<PointT> >;
- using ConstPtr = shared_ptr<const CorrespondenceRejectorSampleConsensus2D<PointT> >;
-
- /** \brief Empty constructor. Sets the inlier threshold to 5cm (0.05m),
- * and the maximum number of iterations to 1000.
- */
- CorrespondenceRejectorSampleConsensus2D ()
- : projection_matrix_ (Eigen::Matrix3f::Identity ())
- {
- rejection_name_ = "CorrespondenceRejectorSampleConsensus2D";
- // Put the projection matrix together
- //projection_matrix_ (0, 0) = 525.f;
- //projection_matrix_ (1, 1) = 525.f;
- //projection_matrix_ (0, 2) = 320.f;
- //projection_matrix_ (1, 2) = 240.f;
- }
-
- /** \brief Get a list of valid correspondences after rejection from the original set of correspondences.
- * \param[in] original_correspondences the set of initial correspondences given
- * \param[out] remaining_correspondences the resultant filtered set of remaining correspondences
- */
- inline void
- getRemainingCorrespondences (const pcl::Correspondences& original_correspondences,
- pcl::Correspondences& remaining_correspondences);
-
- /** \brief Sets the focal length parameters of the target camera.
- * \param[in] fx the focal length in pixels along the x-axis of the image
- * \param[in] fy the focal length in pixels along the y-axis of the image
- */
- inline void
- setFocalLengths (const float fx, const float fy)
- {
- projection_matrix_ (0, 0) = fx;
- projection_matrix_ (1, 1) = fy;
- }
-
- /** \brief Reads back the focal length parameters of the target camera.
- * \param[out] fx the focal length in pixels along the x-axis of the image
- * \param[out] fy the focal length in pixels along the y-axis of the image
- */
- inline void
- getFocalLengths (float &fx, float &fy) const
- {
- fx = projection_matrix_ (0, 0);
- fy = projection_matrix_ (1, 1);
- }
+ rejection_name_ = "CorrespondenceRejectorSampleConsensus2D";
+ // Put the projection matrix together
+ // projection_matrix_ (0, 0) = 525.f;
+ // projection_matrix_ (1, 1) = 525.f;
+ // projection_matrix_ (0, 2) = 320.f;
+ // projection_matrix_ (1, 2) = 240.f;
+ }
+ /** \brief Get a list of valid correspondences after rejection from the original set
+ * of correspondences. \param[in] original_correspondences the set of initial
+ * correspondences given \param[out] remaining_correspondences the resultant filtered
+ * set of remaining correspondences
+ */
+ inline void
+ getRemainingCorrespondences(const pcl::Correspondences& original_correspondences,
+ pcl::Correspondences& remaining_correspondences);
+
+ /** \brief Sets the focal length parameters of the target camera.
+ * \param[in] fx the focal length in pixels along the x-axis of the image
+ * \param[in] fy the focal length in pixels along the y-axis of the image
+ */
+ inline void
+ setFocalLengths(const float fx, const float fy)
+ {
+ projection_matrix_(0, 0) = fx;
+ projection_matrix_(1, 1) = fy;
+ }
- /** \brief Sets the camera center parameters of the target camera.
- * \param[in] cx the x-coordinate of the camera center
- * \param[in] cy the y-coordinate of the camera center
- */
- inline void
- setCameraCenters (const float cx, const float cy)
- {
- projection_matrix_ (0, 2) = cx;
- projection_matrix_ (1, 2) = cy;
- }
+ /** \brief Reads back the focal length parameters of the target camera.
+ * \param[out] fx the focal length in pixels along the x-axis of the image
+ * \param[out] fy the focal length in pixels along the y-axis of the image
+ */
+ inline void
+ getFocalLengths(float& fx, float& fy) const
+ {
+ fx = projection_matrix_(0, 0);
+ fy = projection_matrix_(1, 1);
+ }
- /** \brief Reads back the camera center parameters of the target camera.
- * \param[out] cx the x-coordinate of the camera center
- * \param[out] cy the y-coordinate of the camera center
- */
- inline void
- getCameraCenters (float &cx, float &cy) const
- {
- cx = projection_matrix_ (0, 2);
- cy = projection_matrix_ (1, 2);
- }
+ /** \brief Sets the camera center parameters of the target camera.
+ * \param[in] cx the x-coordinate of the camera center
+ * \param[in] cy the y-coordinate of the camera center
+ */
+ inline void
+ setCameraCenters(const float cx, const float cy)
+ {
+ projection_matrix_(0, 2) = cx;
+ projection_matrix_(1, 2) = cy;
+ }
- protected:
+ /** \brief Reads back the camera center parameters of the target camera.
+ * \param[out] cx the x-coordinate of the camera center
+ * \param[out] cy the y-coordinate of the camera center
+ */
+ inline void
+ getCameraCenters(float& cx, float& cy) const
+ {
+ cx = projection_matrix_(0, 2);
+ cy = projection_matrix_(1, 2);
+ }
- /** \brief Apply the rejection algorithm.
- * \param[out] correspondences the set of resultant correspondences.
- */
- inline void
- applyRejection (pcl::Correspondences &correspondences)
- {
- getRemainingCorrespondences (*input_correspondences_, correspondences);
- }
+protected:
+ /** \brief Apply the rejection algorithm.
+ * \param[out] correspondences the set of resultant correspondences.
+ */
+ inline void
+ applyRejection(pcl::Correspondences& correspondences)
+ {
+ getRemainingCorrespondences(*input_correspondences_, correspondences);
+ }
- /** \brief Camera projection matrix. */
- Eigen::Matrix3f projection_matrix_;
+ /** \brief Camera projection matrix. */
+ Eigen::Matrix3f projection_matrix_;
- public:
- PCL_MAKE_ALIGNED_OPERATOR_NEW
- };
- }
-}
+public:
+ PCL_MAKE_ALIGNED_OPERATOR_NEW
+};
+} // namespace registration
+} // namespace pcl
#include <pcl/registration/impl/correspondence_rejection_sample_consensus_2d.hpp>
#pragma once
#include <pcl/registration/correspondence_rejection.h>
-#include <pcl/memory.h> // for static_pointer_cast
+#include <pcl/conversions.h> // for fromPCLPointCloud2
+#include <pcl/memory.h> // for static_pointer_cast
#include <pcl/point_cloud.h>
+namespace pcl {
+namespace registration {
+/**
+ * @b CorrespondenceRejectorSurfaceNormal implements a simple correspondence
+ * rejection method based on the angle between the normals at correspondent points.
+ *
+ * \note If \ref setInputCloud and \ref setInputTarget are given, then the
+ * distances between correspondences will be estimated using the given XYZ
+ * data, and not read from the set of input correspondences.
+ *
+ * \author Aravindhan K Krishnan (original code from libpointmatcher:
+ * https://github.com/ethz-asl/libpointmatcher) \ingroup registration
+ */
+class PCL_EXPORTS CorrespondenceRejectorSurfaceNormal : public CorrespondenceRejector {
+ using CorrespondenceRejector::getClassName;
+ using CorrespondenceRejector::input_correspondences_;
+ using CorrespondenceRejector::rejection_name_;
-namespace pcl
-{
- namespace registration
- {
- /**
- * @b CorrespondenceRejectorSurfaceNormal implements a simple correspondence
- * rejection method based on the angle between the normals at correspondent points.
- *
- * \note If \ref setInputCloud and \ref setInputTarget are given, then the
- * distances between correspondences will be estimated using the given XYZ
- * data, and not read from the set of input correspondences.
- *
- * \author Aravindhan K Krishnan (original code from libpointmatcher: https://github.com/ethz-asl/libpointmatcher)
- * \ingroup registration
- */
- class PCL_EXPORTS CorrespondenceRejectorSurfaceNormal: public CorrespondenceRejector
- {
- using CorrespondenceRejector::input_correspondences_;
- using CorrespondenceRejector::rejection_name_;
- using CorrespondenceRejector::getClassName;
-
- public:
- using Ptr = shared_ptr<CorrespondenceRejectorSurfaceNormal>;
- using ConstPtr = shared_ptr<const CorrespondenceRejectorSurfaceNormal>;
-
- /** \brief Empty constructor. Sets the threshold to 1.0. */
- CorrespondenceRejectorSurfaceNormal ()
- : threshold_ (1.0)
- {
- rejection_name_ = "CorrespondenceRejectorSurfaceNormal";
- }
-
- /** \brief Get a list of valid correspondences after rejection from the original set of correspondences.
- * \param[in] original_correspondences the set of initial correspondences given
- * \param[out] remaining_correspondences the resultant filtered set of remaining correspondences
- */
- void
- getRemainingCorrespondences (const pcl::Correspondences& original_correspondences,
- pcl::Correspondences& remaining_correspondences) override;
+public:
+ using Ptr = shared_ptr<CorrespondenceRejectorSurfaceNormal>;
+ using ConstPtr = shared_ptr<const CorrespondenceRejectorSurfaceNormal>;
- /** \brief Set the thresholding angle between the normals for correspondence rejection.
- * \param[in] threshold cosine of the thresholding angle between the normals for rejection
- */
- inline void
- setThreshold (double threshold) { threshold_ = threshold; };
+ /** \brief Empty constructor. Sets the threshold to 1.0. */
+ CorrespondenceRejectorSurfaceNormal() : threshold_(1.0)
+ {
+ rejection_name_ = "CorrespondenceRejectorSurfaceNormal";
+ }
- /** \brief Get the thresholding angle between the normals for correspondence rejection. */
- inline double
- getThreshold () const { return threshold_; };
+ /** \brief Get a list of valid correspondences after rejection from the original set
+ * of correspondences. \param[in] original_correspondences the set of initial
+ * correspondences given \param[out] remaining_correspondences the resultant filtered
+ * set of remaining correspondences
+ */
+ void
+ getRemainingCorrespondences(const pcl::Correspondences& original_correspondences,
+ pcl::Correspondences& remaining_correspondences) override;
- /** \brief Initialize the data container object for the point type and the normal type. */
- template <typename PointT, typename NormalT> inline void
- initializeDataContainer ()
- {
- data_container_.reset (new DataContainer<PointT, NormalT>);
- }
+ /** \brief Set the thresholding angle between the normals for correspondence
+ * rejection. \param[in] threshold cosine of the thresholding angle between the
+ * normals for rejection
+ */
+ inline void
+ setThreshold(double threshold)
+ {
+ threshold_ = threshold;
+ };
- /** \brief Provide a source point cloud dataset (must contain XYZ data!), used to compute the correspondence distance.
- * \param[in] input a cloud containing XYZ data
- */
- template <typename PointT>
- PCL_DEPRECATED(1, 12, "pcl::registration::CorrespondenceRejectorSurfaceNormal::setInputCloud is deprecated. Please use setInputSource instead")
- inline void
- setInputCloud (const typename pcl::PointCloud<PointT>::ConstPtr &input)
- {
- if (!data_container_)
- {
- PCL_ERROR ("[pcl::registration::%s::setInputCloud] Initialize the data container object by calling intializeDataContainer () before using this function.\n", getClassName ().c_str ());
- return;
- }
- static_pointer_cast<DataContainer<PointT> > (data_container_)->setInputSource (input);
- }
+ /** \brief Get the thresholding angle between the normals for correspondence
+ * rejection. */
+ inline double
+ getThreshold() const
+ {
+ return threshold_;
+ };
- /** \brief Provide a source point cloud dataset (must contain XYZ data!), used to compute the correspondence distance.
- * \param[in] input a cloud containing XYZ data
- */
- template <typename PointT> inline void
- setInputSource (const typename pcl::PointCloud<PointT>::ConstPtr &input)
- {
- if (!data_container_)
- {
- PCL_ERROR ("[pcl::registration::%s::setInputCloud] Initialize the data container object by calling intializeDataContainer () before using this function.\n", getClassName ().c_str ());
- return;
- }
- static_pointer_cast<DataContainer<PointT> > (data_container_)->setInputSource (input);
- }
+ /** \brief Initialize the data container object for the point type and the normal
+ * type. */
+ template <typename PointT, typename NormalT>
+ inline void
+ initializeDataContainer()
+ {
+ data_container_.reset(new DataContainer<PointT, NormalT>);
+ }
- /** \brief Get the target input point cloud */
- template <typename PointT> inline typename pcl::PointCloud<PointT>::ConstPtr
- getInputSource () const
- {
- if (!data_container_)
- {
- PCL_ERROR ("[pcl::registration::%s::getInputSource] Initialize the data container object by calling intializeDataContainer () before using this function.\n", getClassName ().c_str ());
- return;
- }
- return (static_pointer_cast<DataContainer<PointT> > (data_container_)->getInputSource ());
- }
+ /** \brief Provide a source point cloud dataset (must contain XYZ data!), used to
+ * compute the correspondence distance. \param[in] input a cloud containing XYZ data
+ */
+ template <typename PointT>
+ inline void
+ setInputSource(const typename pcl::PointCloud<PointT>::ConstPtr& input)
+ {
+ if (!data_container_) {
+ PCL_ERROR(
+ "[pcl::registration::%s::setInputCloud] Initialize the data container object "
+ "by calling intializeDataContainer () before using this function.\n",
+ getClassName().c_str());
+ return;
+ }
+ static_pointer_cast<DataContainer<PointT>>(data_container_)->setInputSource(input);
+ }
- /** \brief Provide a target point cloud dataset (must contain XYZ data!), used to compute the correspondence distance.
- * \param[in] target a cloud containing XYZ data
- */
- template <typename PointT> inline void
- setInputTarget (const typename pcl::PointCloud<PointT>::ConstPtr &target)
- {
- if (!data_container_)
- {
- PCL_ERROR ("[pcl::registration::%s::setInputTarget] Initialize the data container object by calling intializeDataContainer () before using this function.\n", getClassName ().c_str ());
- return;
- }
- static_pointer_cast<DataContainer<PointT> > (data_container_)->setInputTarget (target);
- }
+ /** \brief Get the target input point cloud */
+ template <typename PointT>
+ inline typename pcl::PointCloud<PointT>::ConstPtr
+ getInputSource() const
+ {
+ if (!data_container_) {
+ PCL_ERROR(
+ "[pcl::registration::%s::getInputSource] Initialize the data container "
+ "object by calling intializeDataContainer () before using this function.\n",
+ getClassName().c_str());
+ return;
+ }
+ return (
+ static_pointer_cast<DataContainer<PointT>>(data_container_)->getInputSource());
+ }
- /** \brief Provide a pointer to the search object used to find correspondences in
- * the target cloud.
- * \param[in] tree a pointer to the spatial search object.
- * \param[in] force_no_recompute If set to true, this tree will NEVER be
- * recomputed, regardless of calls to setInputTarget. Only use if you are
- * confident that the tree will be set correctly.
- */
- template <typename PointT> inline void
- setSearchMethodTarget (const typename pcl::search::KdTree<PointT>::Ptr &tree,
- bool force_no_recompute = false)
- {
- static_pointer_cast< DataContainer<PointT> >
- (data_container_)->setSearchMethodTarget (tree, force_no_recompute );
- }
+ /** \brief Provide a target point cloud dataset (must contain XYZ data!), used to
+ * compute the correspondence distance. \param[in] target a cloud containing XYZ data
+ */
+ template <typename PointT>
+ inline void
+ setInputTarget(const typename pcl::PointCloud<PointT>::ConstPtr& target)
+ {
+ if (!data_container_) {
+ PCL_ERROR(
+ "[pcl::registration::%s::setInputTarget] Initialize the data container "
+ "object by calling intializeDataContainer () before using this function.\n",
+ getClassName().c_str());
+ return;
+ }
+ static_pointer_cast<DataContainer<PointT>>(data_container_)->setInputTarget(target);
+ }
- /** \brief Get the target input point cloud */
- template <typename PointT> inline typename pcl::PointCloud<PointT>::ConstPtr
- getInputTarget () const
- {
- if (!data_container_)
- {
- PCL_ERROR ("[pcl::registration::%s::getInputTarget] Initialize the data container object by calling intializeDataContainer () before using this function.\n", getClassName ().c_str ());
- return;
- }
- return (static_pointer_cast<DataContainer<PointT> > (data_container_)->getInputTarget ());
- }
+ /** \brief Provide a pointer to the search object used to find correspondences in
+ * the target cloud.
+ * \param[in] tree a pointer to the spatial search object.
+ * \param[in] force_no_recompute If set to true, this tree will NEVER be
+ * recomputed, regardless of calls to setInputTarget. Only use if you are
+ * confident that the tree will be set correctly.
+ */
+ template <typename PointT>
+ inline void
+ setSearchMethodTarget(const typename pcl::search::KdTree<PointT>::Ptr& tree,
+ bool force_no_recompute = false)
+ {
+ static_pointer_cast<DataContainer<PointT>>(data_container_)
+ ->setSearchMethodTarget(tree, force_no_recompute);
+ }
- /** \brief Set the normals computed on the input point cloud
- * \param[in] normals the normals computed for the input cloud
- */
- template <typename PointT, typename NormalT> inline void
- setInputNormals (const typename pcl::PointCloud<NormalT>::ConstPtr &normals)
- {
- if (!data_container_)
- {
- PCL_ERROR ("[pcl::registration::%s::setInputNormals] Initialize the data container object by calling intializeDataContainer () before using this function.\n", getClassName ().c_str ());
- return;
- }
- static_pointer_cast<DataContainer<PointT, NormalT> > (data_container_)->setInputNormals (normals);
- }
+ /** \brief Get the target input point cloud */
+ template <typename PointT>
+ inline typename pcl::PointCloud<PointT>::ConstPtr
+ getInputTarget() const
+ {
+ if (!data_container_) {
+ PCL_ERROR(
+ "[pcl::registration::%s::getInputTarget] Initialize the data container "
+ "object by calling intializeDataContainer () before using this function.\n",
+ getClassName().c_str());
+ return;
+ }
+ return (
+ static_pointer_cast<DataContainer<PointT>>(data_container_)->getInputTarget());
+ }
- /** \brief Get the normals computed on the input point cloud */
- template <typename NormalT> inline typename pcl::PointCloud<NormalT>::Ptr
- getInputNormals () const
- {
- if (!data_container_)
- {
- PCL_ERROR ("[pcl::registration::%s::getInputNormals] Initialize the data container object by calling intializeDataContainer () before using this function.\n", getClassName ().c_str ());
- return;
- }
- return (static_pointer_cast<DataContainer<pcl::PointXYZ, NormalT> > (data_container_)->getInputNormals ());
- }
+ /** \brief Set the normals computed on the input point cloud
+ * \param[in] normals the normals computed for the input cloud
+ */
+ template <typename PointT, typename NormalT>
+ inline void
+ setInputNormals(const typename pcl::PointCloud<NormalT>::ConstPtr& normals)
+ {
+ if (!data_container_) {
+ PCL_ERROR(
+ "[pcl::registration::%s::setInputNormals] Initialize the data container "
+ "object by calling intializeDataContainer () before using this function.\n",
+ getClassName().c_str());
+ return;
+ }
+ static_pointer_cast<DataContainer<PointT, NormalT>>(data_container_)
+ ->setInputNormals(normals);
+ }
- /** \brief Set the normals computed on the target point cloud
- * \param[in] normals the normals computed for the input cloud
- */
- template <typename PointT, typename NormalT> inline void
- setTargetNormals (const typename pcl::PointCloud<NormalT>::ConstPtr &normals)
- {
- if (!data_container_)
- {
- PCL_ERROR ("[pcl::registration::%s::setTargetNormals] Initialize the data container object by calling intializeDataContainer () before using this function.\n", getClassName ().c_str ());
- return;
- }
- static_pointer_cast<DataContainer<PointT, NormalT> > (data_container_)->setTargetNormals (normals);
- }
+ /** \brief Get the normals computed on the input point cloud */
+ template <typename NormalT>
+ inline typename pcl::PointCloud<NormalT>::Ptr
+ getInputNormals() const
+ {
+ if (!data_container_) {
+ PCL_ERROR(
+ "[pcl::registration::%s::getInputNormals] Initialize the data container "
+ "object by calling intializeDataContainer () before using this function.\n",
+ getClassName().c_str());
+ return;
+ }
+ return (static_pointer_cast<DataContainer<pcl::PointXYZ, NormalT>>(data_container_)
+ ->getInputNormals());
+ }
- /** \brief Get the normals computed on the target point cloud */
- template <typename NormalT> inline typename pcl::PointCloud<NormalT>::Ptr
- getTargetNormals () const
- {
- if (!data_container_)
- {
- PCL_ERROR ("[pcl::registration::%s::getTargetNormals] Initialize the data container object by calling intializeDataContainer () before using this function.\n", getClassName ().c_str ());
- return;
- }
- return (static_pointer_cast<DataContainer<pcl::PointXYZ, NormalT> > (data_container_)->getTargetNormals ());
- }
+ /** \brief Set the normals computed on the target point cloud
+ * \param[in] normals the normals computed for the input cloud
+ */
+ template <typename PointT, typename NormalT>
+ inline void
+ setTargetNormals(const typename pcl::PointCloud<NormalT>::ConstPtr& normals)
+ {
+ if (!data_container_) {
+ PCL_ERROR(
+ "[pcl::registration::%s::setTargetNormals] Initialize the data container "
+ "object by calling intializeDataContainer () before using this function.\n",
+ getClassName().c_str());
+ return;
+ }
+ static_pointer_cast<DataContainer<PointT, NormalT>>(data_container_)
+ ->setTargetNormals(normals);
+ }
+ /** \brief Get the normals computed on the target point cloud */
+ template <typename NormalT>
+ inline typename pcl::PointCloud<NormalT>::Ptr
+ getTargetNormals() const
+ {
+ if (!data_container_) {
+ PCL_ERROR(
+ "[pcl::registration::%s::getTargetNormals] Initialize the data container "
+ "object by calling intializeDataContainer () before using this function.\n",
+ getClassName().c_str());
+ return;
+ }
+ return (static_pointer_cast<DataContainer<pcl::PointXYZ, NormalT>>(data_container_)
+ ->getTargetNormals());
+ }
- /** \brief See if this rejector requires source points */
- bool
- requiresSourcePoints () const override
- { return (true); }
+ /** \brief See if this rejector requires source points */
+ bool
+ requiresSourcePoints() const override
+ {
+ return (true);
+ }
- /** \brief Blob method for setting the source cloud */
- void
- setSourcePoints (pcl::PCLPointCloud2::ConstPtr cloud2) override
- {
- if (!data_container_)
- initializeDataContainer<PointXYZ, Normal> ();
- PointCloud<PointXYZ>::Ptr cloud (new PointCloud<PointXYZ>);
- fromPCLPointCloud2 (*cloud2, *cloud);
- setInputSource<PointXYZ> (cloud);
- }
-
- /** \brief See if this rejector requires a target cloud */
- bool
- requiresTargetPoints () const override
- { return (true); }
+ /** \brief Blob method for setting the source cloud */
+ void
+ setSourcePoints(pcl::PCLPointCloud2::ConstPtr cloud2) override
+ {
+ if (!data_container_)
+ initializeDataContainer<PointXYZ, Normal>();
+ PointCloud<PointXYZ>::Ptr cloud(new PointCloud<PointXYZ>);
+ fromPCLPointCloud2(*cloud2, *cloud);
+ setInputSource<PointXYZ>(cloud);
+ }
- /** \brief Method for setting the target cloud */
- void
- setTargetPoints (pcl::PCLPointCloud2::ConstPtr cloud2) override
- {
- if (!data_container_)
- initializeDataContainer<PointXYZ, Normal> ();
- PointCloud<PointXYZ>::Ptr cloud (new PointCloud<PointXYZ>);
- fromPCLPointCloud2 (*cloud2, *cloud);
- setInputTarget<PointXYZ> (cloud);
- }
-
- /** \brief See if this rejector requires source normals */
- bool
- requiresSourceNormals () const override
- { return (true); }
+ /** \brief See if this rejector requires a target cloud */
+ bool
+ requiresTargetPoints() const override
+ {
+ return (true);
+ }
- /** \brief Blob method for setting the source normals */
- void
- setSourceNormals (pcl::PCLPointCloud2::ConstPtr cloud2) override
- {
- if (!data_container_)
- initializeDataContainer<PointXYZ, Normal> ();
- PointCloud<Normal>::Ptr cloud (new PointCloud<Normal>);
- fromPCLPointCloud2 (*cloud2, *cloud);
- setInputNormals<PointXYZ, Normal> (cloud);
- }
-
- /** \brief See if this rejector requires target normals*/
- bool
- requiresTargetNormals () const override
- { return (true); }
+ /** \brief Method for setting the target cloud */
+ void
+ setTargetPoints(pcl::PCLPointCloud2::ConstPtr cloud2) override
+ {
+ if (!data_container_)
+ initializeDataContainer<PointXYZ, Normal>();
+ PointCloud<PointXYZ>::Ptr cloud(new PointCloud<PointXYZ>);
+ fromPCLPointCloud2(*cloud2, *cloud);
+ setInputTarget<PointXYZ>(cloud);
+ }
- /** \brief Method for setting the target normals */
- void
- setTargetNormals (pcl::PCLPointCloud2::ConstPtr cloud2) override
- {
- if (!data_container_)
- initializeDataContainer<PointXYZ, Normal> ();
- PointCloud<Normal>::Ptr cloud (new PointCloud<Normal>);
- fromPCLPointCloud2 (*cloud2, *cloud);
- setTargetNormals<PointXYZ, Normal> (cloud);
- }
+ /** \brief See if this rejector requires source normals */
+ bool
+ requiresSourceNormals() const override
+ {
+ return (true);
+ }
- protected:
+ /** \brief Blob method for setting the source normals */
+ void
+ setSourceNormals(pcl::PCLPointCloud2::ConstPtr cloud2) override
+ {
+ if (!data_container_)
+ initializeDataContainer<PointXYZ, Normal>();
+ PointCloud<Normal>::Ptr cloud(new PointCloud<Normal>);
+ fromPCLPointCloud2(*cloud2, *cloud);
+ setInputNormals<PointXYZ, Normal>(cloud);
+ }
- /** \brief Apply the rejection algorithm.
- * \param[out] correspondences the set of resultant correspondences.
- */
- inline void
- applyRejection (pcl::Correspondences &correspondences) override
- {
- getRemainingCorrespondences (*input_correspondences_, correspondences);
- }
+ /** \brief See if this rejector requires target normals*/
+ bool
+ requiresTargetNormals() const override
+ {
+ return (true);
+ }
- /** \brief The median distance threshold between two correspondent points in source <-> target. */
- double threshold_;
+ /** \brief Method for setting the target normals */
+ void
+ setTargetNormals(pcl::PCLPointCloud2::ConstPtr cloud2) override
+ {
+ if (!data_container_)
+ initializeDataContainer<PointXYZ, Normal>();
+ PointCloud<Normal>::Ptr cloud(new PointCloud<Normal>);
+ fromPCLPointCloud2(*cloud2, *cloud);
+ setTargetNormals<PointXYZ, Normal>(cloud);
+ }
- using DataContainerPtr = DataContainerInterface::Ptr;
- /** \brief A pointer to the DataContainer object containing the input and target point clouds */
- DataContainerPtr data_container_;
- };
+protected:
+ /** \brief Apply the rejection algorithm.
+ * \param[out] correspondences the set of resultant correspondences.
+ */
+ inline void
+ applyRejection(pcl::Correspondences& correspondences) override
+ {
+ getRemainingCorrespondences(*input_correspondences_, correspondences);
}
-}
-#include <pcl/registration/impl/correspondence_rejection_surface_normal.hpp>
+ /** \brief The median distance threshold between two correspondent points in source
+ * <-> target. */
+ double threshold_;
+
+ using DataContainerPtr = DataContainerInterface::Ptr;
+ /** \brief A pointer to the DataContainer object containing the input and target point
+ * clouds */
+ DataContainerPtr data_container_;
+};
+} // namespace registration
+} // namespace pcl
#include <pcl/registration/correspondence_rejection.h>
-namespace pcl
-{
- namespace registration
- {
- /** \brief CorrespondenceRejectorTrimmed implements a correspondence
- * rejection for ICP-like registration algorithms that uses only the best
- * 'k' correspondences where 'k' is some estimate of the overlap between
- * the two point clouds being registered.
- *
- * Reference:
- * 'The Trimmed Iterative Closest Point Algorithm' by
- * D. Chetverikov, D. Svirko, D. Stepanov, and Pavel Krsek.
- * In Proceedings of the 16th International Conference on Pattern
- * Recognition (ICPR 2002).
- *
- * \author Dirk Holz
- * \ingroup registration
- */
- class PCL_EXPORTS CorrespondenceRejectorTrimmed: public CorrespondenceRejector
- {
- using CorrespondenceRejector::input_correspondences_;
- using CorrespondenceRejector::rejection_name_;
- using CorrespondenceRejector::getClassName;
-
- public:
- using Ptr = shared_ptr<CorrespondenceRejectorTrimmed>;
- using ConstPtr = shared_ptr<const CorrespondenceRejectorTrimmed>;
-
- /** \brief Empty constructor. */
- CorrespondenceRejectorTrimmed () :
- overlap_ratio_ (0.5f),
- nr_min_correspondences_ (0)
- {
- rejection_name_ = "CorrespondenceRejectorTrimmed";
- }
-
- /** \brief Destructor. */
- ~CorrespondenceRejectorTrimmed () {}
-
- /** \brief Set the expected ratio of overlap between point clouds (in
- * terms of correspondences).
- * \param[in] ratio ratio of overlap between 0 (no overlap, no
- * correspondences) and 1 (full overlap, all correspondences)
- */
- virtual inline void
- setOverlapRatio (float ratio) { overlap_ratio_ = std::min (1.0f, std::max (0.0f, ratio)); };
-
- /** \brief Get the maximum distance used for thresholding in correspondence rejection. */
- inline float
- getOverlapRatio () const { return overlap_ratio_; };
-
- /** \brief Set a minimum number of correspondences. If the specified overlap ratio causes to have
- * less correspondences, \a CorrespondenceRejectorTrimmed will try to return at least
- * \a nr_min_correspondences_ correspondences (or all correspondences in case \a nr_min_correspondences_
- * is less than the number of given correspondences).
- * \param[in] min_correspondences the minimum number of correspondences
- */
- inline void
- setMinCorrespondences (unsigned int min_correspondences) { nr_min_correspondences_ = min_correspondences; };
+namespace pcl {
+namespace registration {
+/** \brief CorrespondenceRejectorTrimmed implements a correspondence
+ * rejection for ICP-like registration algorithms that uses only the best
+ * 'k' correspondences where 'k' is some estimate of the overlap between
+ * the two point clouds being registered.
+ *
+ * Reference:
+ * 'The Trimmed Iterative Closest Point Algorithm' by
+ * D. Chetverikov, D. Svirko, D. Stepanov, and Pavel Krsek.
+ * In Proceedings of the 16th International Conference on Pattern
+ * Recognition (ICPR 2002).
+ *
+ * \author Dirk Holz
+ * \ingroup registration
+ */
+class PCL_EXPORTS CorrespondenceRejectorTrimmed : public CorrespondenceRejector {
+ using CorrespondenceRejector::getClassName;
+ using CorrespondenceRejector::input_correspondences_;
+ using CorrespondenceRejector::rejection_name_;
- /** \brief Get the minimum number of correspondences. */
- inline unsigned int
- getMinCorrespondences () const { return nr_min_correspondences_; };
+public:
+ using Ptr = shared_ptr<CorrespondenceRejectorTrimmed>;
+ using ConstPtr = shared_ptr<const CorrespondenceRejectorTrimmed>;
+ /** \brief Empty constructor. */
+ CorrespondenceRejectorTrimmed() : overlap_ratio_(0.5f), nr_min_correspondences_(0)
+ {
+ rejection_name_ = "CorrespondenceRejectorTrimmed";
+ }
- /** \brief Get a list of valid correspondences after rejection from the original set of correspondences.
- * \param[in] original_correspondences the set of initial correspondences given
- * \param[out] remaining_correspondences the resultant filtered set of remaining correspondences
- */
- void
- getRemainingCorrespondences (const pcl::Correspondences& original_correspondences,
- pcl::Correspondences& remaining_correspondences) override;
+ /** \brief Destructor. */
+ ~CorrespondenceRejectorTrimmed() {}
- protected:
+ /** \brief Set the expected ratio of overlap between point clouds (in
+ * terms of correspondences).
+ * \param[in] ratio ratio of overlap between 0 (no overlap, no
+ * correspondences) and 1 (full overlap, all correspondences)
+ */
+ virtual inline void
+ setOverlapRatio(float ratio)
+ {
+ overlap_ratio_ = std::min(1.0f, std::max(0.0f, ratio));
+ };
- /** \brief Apply the rejection algorithm.
- * \param[out] correspondences the set of resultant correspondences.
- */
- inline void
- applyRejection (pcl::Correspondences &correspondences) override
- {
- getRemainingCorrespondences (*input_correspondences_, correspondences);
- }
+ /** \brief Get the maximum distance used for thresholding in correspondence rejection.
+ */
+ inline float
+ getOverlapRatio() const
+ {
+ return overlap_ratio_;
+ };
+
+ /** \brief Set a minimum number of correspondences. If the specified overlap ratio
+ * causes to have less correspondences, \a CorrespondenceRejectorTrimmed will try to
+ * return at least \a nr_min_correspondences_ correspondences (or all correspondences
+ * in case \a nr_min_correspondences_ is less than the number of given
+ * correspondences). \param[in] min_correspondences the minimum number of
+ * correspondences
+ */
+ inline void
+ setMinCorrespondences(unsigned int min_correspondences)
+ {
+ nr_min_correspondences_ = min_correspondences;
+ };
- /** Overlap Ratio in [0..1] */
- float overlap_ratio_;
+ /** \brief Get the minimum number of correspondences. */
+ inline unsigned int
+ getMinCorrespondences() const
+ {
+ return nr_min_correspondences_;
+ };
+
+ /** \brief Get a list of valid correspondences after rejection from the original set
+ * of correspondences. \param[in] original_correspondences the set of initial
+ * correspondences given \param[out] remaining_correspondences the resultant filtered
+ * set of remaining correspondences
+ */
+ void
+ getRemainingCorrespondences(const pcl::Correspondences& original_correspondences,
+ pcl::Correspondences& remaining_correspondences) override;
+
+protected:
+ /** \brief Apply the rejection algorithm.
+ * \param[out] correspondences the set of resultant correspondences.
+ */
+ inline void
+ applyRejection(pcl::Correspondences& correspondences) override
+ {
+ getRemainingCorrespondences(*input_correspondences_, correspondences);
+ }
- /** Minimum number of correspondences. */
- unsigned int nr_min_correspondences_;
- };
+ /** Overlap Ratio in [0..1] */
+ float overlap_ratio_;
- }
-}
+ /** Minimum number of correspondences. */
+ unsigned int nr_min_correspondences_;
+};
-#include <pcl/registration/impl/correspondence_rejection_trimmed.hpp>
+} // namespace registration
+} // namespace pcl
#pragma once
#include <pcl/registration/correspondence_rejection.h>
-#include <pcl/memory.h> // for static_pointer_cast
+#include <pcl/conversions.h> // for fromPCLPointCloud2
+#include <pcl/memory.h> // for static_pointer_cast
#include <pcl/point_cloud.h>
#include <vector>
-namespace pcl
-{
- namespace registration
+namespace pcl {
+namespace registration {
+/**
+ * @b CorrespondenceRejectoVarTrimmed implements a simple correspondence
+ * rejection method by considering as inliers a certain percentage of correspondences
+ * with the least distances. The percentage of inliers is computed internally as
+ * mentioned in the paper 'Outlier Robust ICP for minimizing Fractional RMSD, J. M.
+ * Philips et al'
+ *
+ * \note If \ref setInputCloud and \ref setInputTarget are given, then the
+ * distances between correspondences will be estimated using the given XYZ
+ * data, and not read from the set of input correspondences.
+ *
+ * \author Aravindhan K Krishnan. This code is ported from libpointmatcher
+ * (https://github.com/ethz-asl/libpointmatcher) \ingroup registration
+ */
+class PCL_EXPORTS CorrespondenceRejectorVarTrimmed : public CorrespondenceRejector {
+ using CorrespondenceRejector::getClassName;
+ using CorrespondenceRejector::input_correspondences_;
+ using CorrespondenceRejector::rejection_name_;
+
+public:
+ using Ptr = shared_ptr<CorrespondenceRejectorVarTrimmed>;
+ using ConstPtr = shared_ptr<const CorrespondenceRejectorVarTrimmed>;
+
+ /** \brief Empty constructor. */
+ CorrespondenceRejectorVarTrimmed()
+ : trimmed_distance_(0), factor_(), min_ratio_(0.05), max_ratio_(0.95), lambda_(0.95)
+ {
+ rejection_name_ = "CorrespondenceRejectorVarTrimmed";
+ }
+
+ /** \brief Get a list of valid correspondences after rejection from the original set
+ * of correspondences. \param[in] original_correspondences the set of initial
+ * correspondences given \param[out] remaining_correspondences the resultant filtered
+ * set of remaining correspondences
+ */
+ void
+ getRemainingCorrespondences(const pcl::Correspondences& original_correspondences,
+ pcl::Correspondences& remaining_correspondences) override;
+
+ /** \brief Get the trimmed distance used for thresholding in correspondence rejection.
+ */
+ inline double
+ getTrimmedDistance() const
+ {
+ return trimmed_distance_;
+ };
+
+ /** \brief Provide a source point cloud dataset (must contain XYZ
+ * data!), used to compute the correspondence distance.
+ * \param[in] cloud a cloud containing XYZ data
+ */
+ template <typename PointT>
+ inline void
+ setInputSource(const typename pcl::PointCloud<PointT>::ConstPtr& cloud)
+ {
+ if (!data_container_)
+ data_container_.reset(new DataContainer<PointT>);
+ static_pointer_cast<DataContainer<PointT>>(data_container_)->setInputSource(cloud);
+ }
+
+ /** \brief Provide a target point cloud dataset (must contain XYZ
+ * data!), used to compute the correspondence distance.
+ * \param[in] target a cloud containing XYZ data
+ */
+ template <typename PointT>
+ inline void
+ setInputTarget(const typename pcl::PointCloud<PointT>::ConstPtr& target)
+ {
+ if (!data_container_)
+ data_container_.reset(new DataContainer<PointT>);
+ static_pointer_cast<DataContainer<PointT>>(data_container_)->setInputTarget(target);
+ }
+
+ /** \brief See if this rejector requires source points */
+ bool
+ requiresSourcePoints() const override
+ {
+ return (true);
+ }
+
+ /** \brief Blob method for setting the source cloud */
+ void
+ setSourcePoints(pcl::PCLPointCloud2::ConstPtr cloud2) override
+ {
+ PointCloud<PointXYZ>::Ptr cloud(new PointCloud<PointXYZ>);
+ fromPCLPointCloud2(*cloud2, *cloud);
+ setInputSource<PointXYZ>(cloud);
+ }
+
+ /** \brief See if this rejector requires a target cloud */
+ bool
+ requiresTargetPoints() const override
+ {
+ return (true);
+ }
+
+ /** \brief Method for setting the target cloud */
+ void
+ setTargetPoints(pcl::PCLPointCloud2::ConstPtr cloud2) override
+ {
+ PointCloud<PointXYZ>::Ptr cloud(new PointCloud<PointXYZ>);
+ fromPCLPointCloud2(*cloud2, *cloud);
+ setInputTarget<PointXYZ>(cloud);
+ }
+
+ /** \brief Provide a pointer to the search object used to find correspondences in
+ * the target cloud.
+ * \param[in] tree a pointer to the spatial search object.
+ * \param[in] force_no_recompute If set to true, this tree will NEVER be
+ * recomputed, regardless of calls to setInputTarget. Only use if you are
+ * confident that the tree will be set correctly.
+ */
+ template <typename PointT>
+ inline void
+ setSearchMethodTarget(const typename pcl::search::KdTree<PointT>::Ptr& tree,
+ bool force_no_recompute = false)
+ {
+ static_pointer_cast<DataContainer<PointT>>(data_container_)
+ ->setSearchMethodTarget(tree, force_no_recompute);
+ }
+
+ /** \brief Get the computed inlier ratio used for thresholding in correspondence
+ * rejection. */
+ inline double
+ getTrimFactor() const
+ {
+ return factor_;
+ }
+
+ /** brief set the minimum overlap ratio
+ * \param[in] ratio the overlap ratio [0..1]
+ */
+ inline void
+ setMinRatio(double ratio)
+ {
+ min_ratio_ = ratio;
+ }
+
+ /** brief get the minimum overlap ratio
+ */
+ inline double
+ getMinRatio() const
+ {
+ return min_ratio_;
+ }
+
+ /** brief set the maximum overlap ratio
+ * \param[in] ratio the overlap ratio [0..1]
+ */
+ inline void
+ setMaxRatio(double ratio)
+ {
+ max_ratio_ = ratio;
+ }
+
+ /** brief get the maximum overlap ratio
+ */
+ inline double
+ getMaxRatio() const
+ {
+ return max_ratio_;
+ }
+
+protected:
+ /** \brief Apply the rejection algorithm.
+ * \param[out] correspondences the set of resultant correspondences.
+ */
+ inline void
+ applyRejection(pcl::Correspondences& correspondences) override
{
- /**
- * @b CorrespondenceRejectoVarTrimmed implements a simple correspondence
- * rejection method by considering as inliers a certain percentage of correspondences
- * with the least distances. The percentage of inliers is computed internally as mentioned
- * in the paper 'Outlier Robust ICP for minimizing Fractional RMSD, J. M. Philips et al'
- *
- * \note If \ref setInputCloud and \ref setInputTarget are given, then the
- * distances between correspondences will be estimated using the given XYZ
- * data, and not read from the set of input correspondences.
- *
- * \author Aravindhan K Krishnan. This code is ported from libpointmatcher (https://github.com/ethz-asl/libpointmatcher)
- * \ingroup registration
- */
- class PCL_EXPORTS CorrespondenceRejectorVarTrimmed: public CorrespondenceRejector
- {
- using CorrespondenceRejector::input_correspondences_;
- using CorrespondenceRejector::rejection_name_;
- using CorrespondenceRejector::getClassName;
-
- public:
- using Ptr = shared_ptr<CorrespondenceRejectorVarTrimmed>;
- using ConstPtr = shared_ptr<const CorrespondenceRejectorVarTrimmed>;
-
- /** \brief Empty constructor. */
- CorrespondenceRejectorVarTrimmed () :
- trimmed_distance_ (0),
- factor_ (),
- min_ratio_ (0.05),
- max_ratio_ (0.95),
- lambda_ (0.95)
- {
- rejection_name_ = "CorrespondenceRejectorVarTrimmed";
- }
-
- /** \brief Get a list of valid correspondences after rejection from the original set of correspondences.
- * \param[in] original_correspondences the set of initial correspondences given
- * \param[out] remaining_correspondences the resultant filtered set of remaining correspondences
- */
- void
- getRemainingCorrespondences (const pcl::Correspondences& original_correspondences,
- pcl::Correspondences& remaining_correspondences) override;
-
- /** \brief Get the trimmed distance used for thresholding in correspondence rejection. */
- inline double
- getTrimmedDistance () const { return trimmed_distance_; };
-
- /** \brief Provide a source point cloud dataset (must contain XYZ
- * data!), used to compute the correspondence distance.
- * \param[in] cloud a cloud containing XYZ data
- */
- template <typename PointT> inline void
- setInputSource (const typename pcl::PointCloud<PointT>::ConstPtr &cloud)
- {
- if (!data_container_)
- data_container_.reset (new DataContainer<PointT>);
- static_pointer_cast<DataContainer<PointT> > (data_container_)->setInputSource (cloud);
- }
-
- /** \brief Provide a source point cloud dataset (must contain XYZ
- * data!), used to compute the correspondence distance.
- * \param[in] cloud a cloud containing XYZ data
- */
- template <typename PointT>
- PCL_DEPRECATED(1, 12, "pcl::registration::CorrespondenceRejectorVarTrimmed::setInputCloud is deprecated. Please use setInputSource instead")
- inline void
- setInputCloud (const typename pcl::PointCloud<PointT>::ConstPtr &cloud)
- {
- if (!data_container_)
- data_container_.reset (new DataContainer<PointT>);
- static_pointer_cast<DataContainer<PointT> > (data_container_)->setInputSource (cloud);
- }
-
- /** \brief Provide a target point cloud dataset (must contain XYZ
- * data!), used to compute the correspondence distance.
- * \param[in] target a cloud containing XYZ data
- */
- template <typename PointT> inline void
- setInputTarget (const typename pcl::PointCloud<PointT>::ConstPtr &target)
- {
- if (!data_container_)
- data_container_.reset (new DataContainer<PointT>);
- static_pointer_cast<DataContainer<PointT> > (data_container_)->setInputTarget (target);
- }
-
-
-
- /** \brief See if this rejector requires source points */
- bool
- requiresSourcePoints () const override
- { return (true); }
-
- /** \brief Blob method for setting the source cloud */
- void
- setSourcePoints (pcl::PCLPointCloud2::ConstPtr cloud2) override
- {
- PointCloud<PointXYZ>::Ptr cloud (new PointCloud<PointXYZ>);
- fromPCLPointCloud2 (*cloud2, *cloud);
- setInputSource<PointXYZ> (cloud);
- }
-
- /** \brief See if this rejector requires a target cloud */
- bool
- requiresTargetPoints () const override
- { return (true); }
-
- /** \brief Method for setting the target cloud */
- void
- setTargetPoints (pcl::PCLPointCloud2::ConstPtr cloud2) override
- {
- PointCloud<PointXYZ>::Ptr cloud (new PointCloud<PointXYZ>);
- fromPCLPointCloud2 (*cloud2, *cloud);
- setInputTarget<PointXYZ> (cloud);
- }
-
- /** \brief Provide a pointer to the search object used to find correspondences in
- * the target cloud.
- * \param[in] tree a pointer to the spatial search object.
- * \param[in] force_no_recompute If set to true, this tree will NEVER be
- * recomputed, regardless of calls to setInputTarget. Only use if you are
- * confident that the tree will be set correctly.
- */
- template <typename PointT> inline void
- setSearchMethodTarget (const typename pcl::search::KdTree<PointT>::Ptr &tree,
- bool force_no_recompute = false)
- {
- static_pointer_cast< DataContainer<PointT> >
- (data_container_)->setSearchMethodTarget (tree, force_no_recompute );
- }
-
- /** \brief Get the computed inlier ratio used for thresholding in correspondence rejection. */
- inline double
- getTrimFactor () const { return factor_; }
-
- /** brief set the minimum overlap ratio
- * \param[in] ratio the overlap ratio [0..1]
- */
- inline void
- setMinRatio (double ratio) { min_ratio_ = ratio; }
-
- /** brief get the minimum overlap ratio
- */
- inline double
- getMinRatio () const { return min_ratio_; }
-
- /** brief set the maximum overlap ratio
- * \param[in] ratio the overlap ratio [0..1]
- */
- inline void
- setMaxRatio (double ratio) { max_ratio_ = ratio; }
-
- /** brief get the maximum overlap ratio
- */
- inline double
- getMaxRatio () const { return max_ratio_; }
-
- protected:
-
- /** \brief Apply the rejection algorithm.
- * \param[out] correspondences the set of resultant correspondences.
- */
- inline void
- applyRejection (pcl::Correspondences &correspondences) override
- {
- getRemainingCorrespondences (*input_correspondences_, correspondences);
- }
-
- /** \brief The inlier distance threshold (based on the computed trim factor) between two correspondent points in source <-> target.
- */
- double trimmed_distance_;
-
- /** \brief The factor for correspondence rejection. Only factor times the total points sorted based on
- * the correspondence distances will be considered as inliers. Remaining points are rejected. This factor is
- * computed internally
- */
- double factor_;
-
- /** \brief The minimum overlap ratio between the input and target clouds
- */
- double min_ratio_;
-
- /** \brief The maximum overlap ratio between the input and target clouds
- */
- double max_ratio_;
-
- /** \brief part of the term that balances the root mean square difference. This is an internal parameter
- */
- double lambda_;
-
- using DataContainerPtr = DataContainerInterface::Ptr;
-
- /** \brief A pointer to the DataContainer object containing the input and target point clouds */
- DataContainerPtr data_container_;
-
- private:
-
- /** \brief finds the optimal inlier ratio. This is based on the paper 'Outlier Robust ICP for minimizing Fractional RMSD, J. M. Philips et al'
- */
- inline float optimizeInlierRatio (std::vector <double> &dists) const;
- };
+ getRemainingCorrespondences(*input_correspondences_, correspondences);
}
-}
-#include <pcl/registration/impl/correspondence_rejection_var_trimmed.hpp>
+ /** \brief The inlier distance threshold (based on the computed trim factor) between
+ * two correspondent points in source <-> target.
+ */
+ double trimmed_distance_;
+
+ /** \brief The factor for correspondence rejection. Only factor times the total points
+ * sorted based on the correspondence distances will be considered as inliers.
+ * Remaining points are rejected. This factor is computed internally
+ */
+ double factor_;
+
+ /** \brief The minimum overlap ratio between the input and target clouds
+ */
+ double min_ratio_;
+
+ /** \brief The maximum overlap ratio between the input and target clouds
+ */
+ double max_ratio_;
+
+ /** \brief part of the term that balances the root mean square difference. This is an
+ * internal parameter
+ */
+ double lambda_;
+
+ using DataContainerPtr = DataContainerInterface::Ptr;
+
+ /** \brief A pointer to the DataContainer object containing the input and target point
+ * clouds */
+ DataContainerPtr data_container_;
+
+private:
+ /** \brief finds the optimal inlier ratio. This is based on the paper 'Outlier Robust
+ * ICP for minimizing Fractional RMSD, J. M. Philips et al'
+ */
+ inline float
+ optimizeInlierRatio(std::vector<double>& dists) const;
+};
+} // namespace registration
+} // namespace pcl
#pragma once
#if defined __GNUC__
-# pragma GCC system_header
+#pragma GCC system_header
#endif
#include <pcl/registration/correspondence_types.h>
-namespace pcl
-{
- namespace registration
+namespace pcl {
+namespace registration {
+/** @b sortCorrespondencesByQueryIndex : a functor for sorting correspondences by query
+ * index \author Dirk Holz \ingroup registration
+ */
+struct sortCorrespondencesByQueryIndex {
+ using first_argument_type = pcl::Correspondence;
+ using second_argument_type = pcl::Correspondence;
+ using result_type = bool;
+ bool
+ operator()(pcl::Correspondence a, pcl::Correspondence b)
{
- /** @b sortCorrespondencesByQueryIndex : a functor for sorting correspondences by query index
- * \author Dirk Holz
- * \ingroup registration
- */
- struct sortCorrespondencesByQueryIndex
- {
- using first_argument_type = pcl::Correspondence;
- using second_argument_type = pcl::Correspondence;
- using result_type = bool;
- bool
- operator()( pcl::Correspondence a, pcl::Correspondence b)
- {
- return (a.index_query < b.index_query);
- }
- };
-
- /** @b sortCorrespondencesByMatchIndex : a functor for sorting correspondences by match index
- * \author Dirk Holz
- * \ingroup registration
- */
- struct sortCorrespondencesByMatchIndex
- {
- using first_argument_type = pcl::Correspondence;
- using second_argument_type = pcl::Correspondence;
- using result_type = bool;
- bool
- operator()( pcl::Correspondence a, pcl::Correspondence b)
- {
- return (a.index_match < b.index_match);
- }
- };
+ return (a.index_query < b.index_query);
+ }
+};
- /** @b sortCorrespondencesByDistance : a functor for sorting correspondences by distance
- * \author Dirk Holz
- * \ingroup registration
- */
- struct sortCorrespondencesByDistance
- {
- using first_argument_type = pcl::Correspondence;
- using second_argument_type = pcl::Correspondence;
- using result_type = bool;
- bool
- operator()( pcl::Correspondence a, pcl::Correspondence b)
- {
- return (a.distance < b.distance);
- }
- };
+/** @b sortCorrespondencesByMatchIndex : a functor for sorting correspondences by match
+ * index \author Dirk Holz \ingroup registration
+ */
+struct sortCorrespondencesByMatchIndex {
+ using first_argument_type = pcl::Correspondence;
+ using second_argument_type = pcl::Correspondence;
+ using result_type = bool;
+ bool
+ operator()(pcl::Correspondence a, pcl::Correspondence b)
+ {
+ return (a.index_match < b.index_match);
+ }
+};
- /** @b sortCorrespondencesByQueryIndexAndDistance : a functor for sorting correspondences by query index _and_ distance
- * \author Dirk Holz
- * \ingroup registration
- */
- struct sortCorrespondencesByQueryIndexAndDistance
- {
- using first_argument_type = pcl::Correspondence;
- using second_argument_type = pcl::Correspondence;
- using result_type = bool;
- inline bool
- operator()( pcl::Correspondence a, pcl::Correspondence b)
- {
- if (a.index_query < b.index_query)
- return (true);
- else if ( (a.index_query == b.index_query) && (a.distance < b.distance) )
- return (true);
- return (false);
- }
- };
+/** @b sortCorrespondencesByDistance : a functor for sorting correspondences by distance
+ * \author Dirk Holz
+ * \ingroup registration
+ */
+struct sortCorrespondencesByDistance {
+ using first_argument_type = pcl::Correspondence;
+ using second_argument_type = pcl::Correspondence;
+ using result_type = bool;
+ bool
+ operator()(pcl::Correspondence a, pcl::Correspondence b)
+ {
+ return (a.distance < b.distance);
+ }
+};
- /** @b sortCorrespondencesByMatchIndexAndDistance : a functor for sorting correspondences by match index _and_ distance
- * \author Dirk Holz
- * \ingroup registration
- */
- struct sortCorrespondencesByMatchIndexAndDistance
- {
- using first_argument_type = pcl::Correspondence;
- using second_argument_type = pcl::Correspondence;
- using result_type = bool;
- inline bool
- operator()( pcl::Correspondence a, pcl::Correspondence b)
- {
- if (a.index_match < b.index_match)
- return (true);
- else if ( (a.index_match == b.index_match) && (a.distance < b.distance) )
- return (true);
- return (false);
- }
- };
+/** @b sortCorrespondencesByQueryIndexAndDistance : a functor for sorting
+ * correspondences by query index _and_ distance \author Dirk Holz \ingroup registration
+ */
+struct sortCorrespondencesByQueryIndexAndDistance {
+ using first_argument_type = pcl::Correspondence;
+ using second_argument_type = pcl::Correspondence;
+ using result_type = bool;
+ inline bool
+ operator()(pcl::Correspondence a, pcl::Correspondence b)
+ {
+ if (a.index_query < b.index_query)
+ return (true);
+ else if ((a.index_query == b.index_query) && (a.distance < b.distance))
+ return (true);
+ return (false);
+ }
+};
+/** @b sortCorrespondencesByMatchIndexAndDistance : a functor for sorting
+ * correspondences by match index _and_ distance \author Dirk Holz \ingroup registration
+ */
+struct sortCorrespondencesByMatchIndexAndDistance {
+ using first_argument_type = pcl::Correspondence;
+ using second_argument_type = pcl::Correspondence;
+ using result_type = bool;
+ inline bool
+ operator()(pcl::Correspondence a, pcl::Correspondence b)
+ {
+ if (a.index_match < b.index_match)
+ return (true);
+ else if ((a.index_match == b.index_match) && (a.distance < b.distance))
+ return (true);
+ return (false);
}
-}
+};
+
+} // namespace registration
+} // namespace pcl
#include <pcl/correspondence.h>
-namespace pcl
-{
- namespace registration
- {
- /** \brief calculates the mean and standard deviation of descriptor distances from correspondences
- * \param[in] correspondences list of correspondences
- * \param[out] mean the mean descriptor distance of correspondences
- * \param[out] stddev the standard deviation of descriptor distances.
- * \note The sample variance is used to determine the standard deviation
- */
- inline void
- getCorDistMeanStd (const pcl::Correspondences& correspondences, double &mean, double &stddev);
+namespace pcl {
+namespace registration {
+/** \brief calculates the mean and standard deviation of descriptor distances from
+ * correspondences \param[in] correspondences list of correspondences \param[out] mean
+ * the mean descriptor distance of correspondences \param[out] stddev the standard
+ * deviation of descriptor distances. \note The sample variance is used to determine the
+ * standard deviation
+ */
+inline void
+getCorDistMeanStd(const pcl::Correspondences& correspondences,
+ double& mean,
+ double& stddev);
- /** \brief extracts the query indices
- * \param[in] correspondences list of correspondences
- * \param[out] indices array of extracted indices.
- * \note order of indices corresponds to input list of descriptor correspondences
- */
- inline void
- getQueryIndices (const pcl::Correspondences& correspondences, std::vector<int>& indices);
+/** \brief extracts the query indices
+ * \param[in] correspondences list of correspondences
+ * \param[out] indices array of extracted indices.
+ * \note order of indices corresponds to input list of descriptor correspondences
+ */
+inline void
+getQueryIndices(const pcl::Correspondences& correspondences, pcl::Indices& indices);
- /** \brief extracts the match indices
- * \param[in] correspondences list of correspondences
- * \param[out] indices array of extracted indices.
- * \note order of indices corresponds to input list of descriptor correspondences
- */
- inline void
- getMatchIndices (const pcl::Correspondences& correspondences, std::vector<int>& indices);
+/** \brief extracts the match indices
+ * \param[in] correspondences list of correspondences
+ * \param[out] indices array of extracted indices.
+ * \note order of indices corresponds to input list of descriptor correspondences
+ */
+inline void
+getMatchIndices(const pcl::Correspondences& correspondences, pcl::Indices& indices);
- }
-}
+} // namespace registration
+} // namespace pcl
#include <pcl/registration/impl/correspondence_types.hpp>
#pragma once
+#include <pcl/registration/convergence_criteria.h>
+#include <pcl/correspondence.h>
#include <pcl/memory.h>
#include <pcl/pcl_macros.h>
-#include <pcl/registration/eigen.h>
-#include <pcl/correspondence.h>
-#include <pcl/registration/convergence_criteria.h>
-namespace pcl
-{
- namespace registration
+namespace pcl {
+namespace registration {
+/** \brief @b DefaultConvergenceCriteria represents an instantiation of
+ * ConvergenceCriteria, and implements the following criteria for registration loop
+ * evaluation:
+ *
+ * * a maximum number of iterations has been reached
+ * * the transformation (R, t) cannot be further updated (the difference between
+ * current and previous is smaller than a threshold)
+ * * the Mean Squared Error (MSE) between the current set of correspondences and the
+ * previous one is smaller than some threshold (both relative and absolute tests)
+ *
+ * \note Convergence is considered reached if ANY of the above criteria are met.
+ *
+ * \author Radu B. Rusu
+ * \ingroup registration
+ */
+template <typename Scalar = float>
+class DefaultConvergenceCriteria : public ConvergenceCriteria {
+public:
+ using Ptr = shared_ptr<DefaultConvergenceCriteria<Scalar>>;
+ using ConstPtr = shared_ptr<const DefaultConvergenceCriteria<Scalar>>;
+
+ using Matrix4 = Eigen::Matrix<Scalar, 4, 4>;
+
+ enum ConvergenceState {
+ CONVERGENCE_CRITERIA_NOT_CONVERGED,
+ CONVERGENCE_CRITERIA_ITERATIONS,
+ CONVERGENCE_CRITERIA_TRANSFORM,
+ CONVERGENCE_CRITERIA_ABS_MSE,
+ CONVERGENCE_CRITERIA_REL_MSE,
+ CONVERGENCE_CRITERIA_NO_CORRESPONDENCES,
+ CONVERGENCE_CRITERIA_FAILURE_AFTER_MAX_ITERATIONS
+ };
+
+ /** \brief Empty constructor.
+ * Sets:
+ * * the maximum number of iterations to 1000
+ * * the rotation threshold to 0.256 degrees (0.99999)
+ * * the translation threshold to 0.0003 meters (3e-4^2)
+ * * the MSE relative / absolute thresholds to 0.001% and 1e-12
+ *
+ * \param[in] iterations a reference to the number of iterations the loop has ran so
+ * far \param[in] transform a reference to the current transformation obtained by the
+ * transformation evaluation \param[in] correspondences a reference to the current set
+ * of point correspondences between source and target
+ */
+ DefaultConvergenceCriteria(const int& iterations,
+ const Matrix4& transform,
+ const pcl::Correspondences& correspondences)
+ : iterations_(iterations)
+ , transformation_(transform)
+ , correspondences_(correspondences)
+ , correspondences_prev_mse_(std::numeric_limits<double>::max())
+ , correspondences_cur_mse_(std::numeric_limits<double>::max())
+ , max_iterations_(100) // 100 iterations
+ , failure_after_max_iter_(false)
+ , rotation_threshold_(0.99999) // 0.256 degrees
+ , translation_threshold_(3e-4 * 3e-4) // 0.0003 meters
+ , mse_threshold_relative_(0.00001) // 0.001% of the previous MSE (relative error)
+ , mse_threshold_absolute_(1e-12) // MSE (absolute error)
+ , iterations_similar_transforms_(0)
+ , max_iterations_similar_transforms_(0)
+ , convergence_state_(CONVERGENCE_CRITERIA_NOT_CONVERGED)
+ {}
+
+ /** \brief Empty destructor */
+ ~DefaultConvergenceCriteria() {}
+
+ /** \brief Set the maximum number of consecutive iterations that the internal
+ * rotation, translation, and MSE differences are allowed to be similar. \param[in]
+ * nr_iterations the maximum number of iterations
+ */
+ inline void
+ setMaximumIterationsSimilarTransforms(const int nr_iterations)
+ {
+ max_iterations_similar_transforms_ = nr_iterations;
+ }
+
+ /** \brief Get the maximum number of consecutive iterations that the internal
+ * rotation, translation, and MSE differences are allowed to be similar, as set by the
+ * user.
+ */
+ inline int
+ getMaximumIterationsSimilarTransforms() const
+ {
+ return (max_iterations_similar_transforms_);
+ }
+
+ /** \brief Set the maximum number of iterations the internal optimization should run
+ * for. \param[in] nr_iterations the maximum number of iterations the internal
+ * optimization should run for
+ */
+ inline void
+ setMaximumIterations(const int nr_iterations)
+ {
+ max_iterations_ = nr_iterations;
+ }
+
+ /** \brief Get the maximum number of iterations the internal optimization should run
+ * for, as set by the user. */
+ inline int
+ getMaximumIterations() const
+ {
+ return (max_iterations_);
+ }
+
+ /** \brief Specifies if the registration fails or converges when the maximum number of
+ * iterations is reached. \param[in] failure_after_max_iter If true, the registration
+ * fails. If false, the registration is assumed to have converged.
+ */
+ inline void
+ setFailureAfterMaximumIterations(const bool failure_after_max_iter)
+ {
+ failure_after_max_iter_ = failure_after_max_iter;
+ }
+
+ /** \brief Get whether the registration will fail or converge when the maximum number
+ * of iterations is reached. */
+ inline bool
+ getFailureAfterMaximumIterations() const
+ {
+ return (failure_after_max_iter_);
+ }
+
+ /** \brief Set the rotation threshold cosine angle (maximum allowable difference
+ * between two consecutive transformations) in order for an optimization to be
+ * considered as having converged to the final solution. \param[in] threshold the
+ * rotation threshold in order for an optimization to be considered as having
+ * converged to the final solution.
+ */
+ inline void
+ setRotationThreshold(const double threshold)
+ {
+ rotation_threshold_ = threshold;
+ }
+
+ /** \brief Get the rotation threshold cosine angle (maximum allowable difference
+ * between two consecutive transformations) as set by the user.
+ */
+ inline double
+ getRotationThreshold() const
+ {
+ return (rotation_threshold_);
+ }
+
+ /** \brief Set the translation threshold (maximum allowable difference between two
+ * consecutive transformations) in order for an optimization to be considered as
+ * having converged to the final solution. \param[in] threshold the translation
+ * threshold in order for an optimization to be considered as having converged to the
+ * final solution.
+ */
+ inline void
+ setTranslationThreshold(const double threshold)
+ {
+ translation_threshold_ = threshold;
+ }
+
+ /** \brief Get the rotation threshold cosine angle (maximum allowable difference
+ * between two consecutive transformations) as set by the user.
+ */
+ inline double
+ getTranslationThreshold() const
+ {
+ return (translation_threshold_);
+ }
+
+ /** \brief Set the relative MSE between two consecutive sets of correspondences.
+ * \param[in] mse_relative the relative MSE threshold
+ */
+ inline void
+ setRelativeMSE(const double mse_relative)
+ {
+ mse_threshold_relative_ = mse_relative;
+ }
+
+ /** \brief Get the relative MSE between two consecutive sets of correspondences. */
+ inline double
+ getRelativeMSE() const
+ {
+ return (mse_threshold_relative_);
+ }
+
+ /** \brief Set the absolute MSE between two consecutive sets of correspondences.
+ * \param[in] mse_absolute the relative MSE threshold
+ */
+ inline void
+ setAbsoluteMSE(const double mse_absolute)
+ {
+ mse_threshold_absolute_ = mse_absolute;
+ }
+
+ /** \brief Get the absolute MSE between two consecutive sets of correspondences. */
+ inline double
+ getAbsoluteMSE() const
+ {
+ return (mse_threshold_absolute_);
+ }
+
+ /** \brief Check if convergence has been reached. */
+ bool
+ hasConverged() override;
+
+ /** \brief Return the convergence state after hasConverged () */
+ ConvergenceState
+ getConvergenceState()
+ {
+ return (convergence_state_);
+ }
+
+ /** \brief Sets the convergence state externally (for example, when ICP does not find
+ * enough correspondences to estimate a transformation, the function is called setting
+ * the convergence state to ConvergenceState::CONVERGENCE_CRITERIA_NO_CORRESPONDENCES)
+ * \param[in] c the convergence state
+ */
+ inline void
+ setConvergenceState(ConvergenceState c)
{
- /** \brief @b DefaultConvergenceCriteria represents an instantiation of
- * ConvergenceCriteria, and implements the following criteria for registration loop
- * evaluation:
- *
- * * a maximum number of iterations has been reached
- * * the transformation (R, t) cannot be further updated (the difference between current and previous is smaller than a threshold)
- * * the Mean Squared Error (MSE) between the current set of correspondences and the previous one is smaller than some threshold (both relative and absolute tests)
- *
- * \note Convergence is considered reached if ANY of the above criteria are met.
- *
- * \author Radu B. Rusu
- * \ingroup registration
- */
- template <typename Scalar = float>
- class DefaultConvergenceCriteria : public ConvergenceCriteria
- {
- public:
- using Ptr = shared_ptr<DefaultConvergenceCriteria<Scalar> >;
- using ConstPtr = shared_ptr<const DefaultConvergenceCriteria<Scalar> >;
-
- using Matrix4 = Eigen::Matrix<Scalar, 4, 4>;
-
- enum ConvergenceState
- {
- CONVERGENCE_CRITERIA_NOT_CONVERGED,
- CONVERGENCE_CRITERIA_ITERATIONS,
- CONVERGENCE_CRITERIA_TRANSFORM,
- CONVERGENCE_CRITERIA_ABS_MSE,
- CONVERGENCE_CRITERIA_REL_MSE,
- CONVERGENCE_CRITERIA_NO_CORRESPONDENCES,
- CONVERGENCE_CRITERIA_FAILURE_AFTER_MAX_ITERATIONS
- };
-
- /** \brief Empty constructor.
- * Sets:
- * * the maximum number of iterations to 1000
- * * the rotation threshold to 0.256 degrees (0.99999)
- * * the translation threshold to 0.0003 meters (3e-4^2)
- * * the MSE relative / absolute thresholds to 0.001% and 1e-12
- *
- * \param[in] iterations a reference to the number of iterations the loop has ran so far
- * \param[in] transform a reference to the current transformation obtained by the transformation evaluation
- * \param[in] correspondences a reference to the current set of point correspondences between source and target
- */
- DefaultConvergenceCriteria (const int &iterations, const Matrix4 &transform, const pcl::Correspondences &correspondences)
- : iterations_ (iterations)
- , transformation_ (transform)
- , correspondences_ (correspondences)
- , correspondences_prev_mse_ (std::numeric_limits<double>::max ())
- , correspondences_cur_mse_ (std::numeric_limits<double>::max ())
- , max_iterations_ (100) // 100 iterations
- , failure_after_max_iter_ (false)
- , rotation_threshold_ (0.99999) // 0.256 degrees
- , translation_threshold_ (3e-4 * 3e-4) // 0.0003 meters
- , mse_threshold_relative_ (0.00001) // 0.001% of the previous MSE (relative error)
- , mse_threshold_absolute_ (1e-12) // MSE (absolute error)
- , iterations_similar_transforms_ (0)
- , max_iterations_similar_transforms_ (0)
- , convergence_state_ (CONVERGENCE_CRITERIA_NOT_CONVERGED)
- {
- }
-
- /** \brief Empty destructor */
- ~DefaultConvergenceCriteria () {}
-
- /** \brief Set the maximum number of consecutive iterations that the internal rotation,
- * translation, and MSE differences are allowed to be similar.
- * \param[in] nr_iterations the maximum number of iterations
- */
- inline void
- setMaximumIterationsSimilarTransforms (const int nr_iterations) { max_iterations_similar_transforms_ = nr_iterations; }
-
- /** \brief Get the maximum number of consecutive iterations that the internal rotation,
- * translation, and MSE differences are allowed to be similar, as set by the user.
- */
- inline int
- getMaximumIterationsSimilarTransforms () const { return (max_iterations_similar_transforms_); }
-
- /** \brief Set the maximum number of iterations the internal optimization should run for.
- * \param[in] nr_iterations the maximum number of iterations the internal optimization should run for
- */
- inline void
- setMaximumIterations (const int nr_iterations) { max_iterations_ = nr_iterations; }
-
- /** \brief Get the maximum number of iterations the internal optimization should run for, as set by the user. */
- inline int
- getMaximumIterations () const { return (max_iterations_); }
-
- /** \brief Specifies if the registration fails or converges when the maximum number of iterations is reached.
- * \param[in] failure_after_max_iter If true, the registration fails. If false, the registration is assumed to have converged.
- */
- inline void
- setFailureAfterMaximumIterations (const bool failure_after_max_iter) { failure_after_max_iter_ = failure_after_max_iter; }
-
- /** \brief Get whether the registration will fail or converge when the maximum number of iterations is reached. */
- inline bool
- getFailureAfterMaximumIterations () const { return (failure_after_max_iter_); }
-
- /** \brief Set the rotation threshold cosine angle (maximum allowable difference between two consecutive transformations) in order for an optimization to be considered as having converged to the final solution.
- * \param[in] threshold the rotation threshold in order for an optimization to be considered as having converged to the final solution.
- */
- inline void
- setRotationThreshold (const double threshold) { rotation_threshold_ = threshold; }
-
- /** \brief Get the rotation threshold cosine angle (maximum allowable difference between two consecutive transformations) as set by the user.
- */
- inline double
- getRotationThreshold () const { return (rotation_threshold_); }
-
- /** \brief Set the translation threshold (maximum allowable difference between two consecutive transformations) in order for an optimization to be considered as having converged to the final solution.
- * \param[in] threshold the translation threshold in order for an optimization to be considered as having converged to the final solution.
- */
- inline void
- setTranslationThreshold (const double threshold) { translation_threshold_ = threshold; }
-
- /** \brief Get the rotation threshold cosine angle (maximum allowable difference between two consecutive transformations) as set by the user.
- */
- inline double
- getTranslationThreshold () const { return (translation_threshold_); }
-
- /** \brief Set the relative MSE between two consecutive sets of correspondences.
- * \param[in] mse_relative the relative MSE threshold
- */
- inline void
- setRelativeMSE (const double mse_relative) { mse_threshold_relative_ = mse_relative; }
-
- /** \brief Get the relative MSE between two consecutive sets of correspondences. */
- inline double
- getRelativeMSE () const { return (mse_threshold_relative_); }
-
- /** \brief Set the absolute MSE between two consecutive sets of correspondences.
- * \param[in] mse_absolute the relative MSE threshold
- */
- inline void
- setAbsoluteMSE (const double mse_absolute) { mse_threshold_absolute_ = mse_absolute; }
-
- /** \brief Get the absolute MSE between two consecutive sets of correspondences. */
- inline double
- getAbsoluteMSE () const { return (mse_threshold_absolute_); }
-
-
- /** \brief Check if convergence has been reached. */
- bool
- hasConverged () override;
-
- /** \brief Return the convergence state after hasConverged () */
- ConvergenceState
- getConvergenceState ()
- {
- return (convergence_state_);
- }
-
- /** \brief Sets the convergence state externally (for example, when ICP does not find
- * enough correspondences to estimate a transformation, the function is called setting
- * the convergence state to ConvergenceState::CONVERGENCE_CRITERIA_NO_CORRESPONDENCES)
- * \param[in] c the convergence state
- */
- inline void
- setConvergenceState(ConvergenceState c)
- {
- convergence_state_ = c;
- }
-
- protected:
-
- /** \brief Calculate the mean squared error (MSE) of the distance for a given set of correspondences.
- * \param[in] correspondences the given set of correspondences
- */
- inline double
- calculateMSE (const pcl::Correspondences &correspondences) const
- {
- double mse = 0;
- for (const auto &correspondence : correspondences)
- mse += correspondence.distance;
- mse /= double (correspondences.size ());
- return (mse);
- }
-
- /** \brief The number of iterations done by the registration loop so far. */
- const int &iterations_;
-
- /** \brief The current transformation obtained by the transformation estimation method. */
- const Matrix4 &transformation_;
-
- /** \brief The current set of point correspondences between the source and the target. */
- const pcl::Correspondences &correspondences_;
-
- /** \brief The MSE for the previous set of correspondences. */
- double correspondences_prev_mse_;
-
- /** \brief The MSE for the current set of correspondences. */
- double correspondences_cur_mse_;
-
- /** \brief The maximum nuyyGmber of iterations that the registration loop is to be executed. */
- int max_iterations_;
-
- /** \brief Specifys if the registration fails or converges when the maximum number of iterations is reached. */
- bool failure_after_max_iter_;
-
- /** \brief The rotation threshold is the relative rotation between two iterations (as angle cosine). */
- double rotation_threshold_;
-
- /** \brief The translation threshold is the relative translation between two iterations (0 if no translation). */
- double translation_threshold_;
-
- /** \brief The relative change from the previous MSE for the current set of correspondences, e.g. .1 means 10% change. */
- double mse_threshold_relative_;
-
- /** \brief The absolute change from the previous MSE for the current set of correspondences. */
- double mse_threshold_absolute_;
-
- /** \brief Internal counter for the number of iterations that the internal
- * rotation, translation, and MSE differences are allowed to be similar. */
- int iterations_similar_transforms_;
-
- /** \brief The maximum number of iterations that the internal rotation,
- * translation, and MSE differences are allowed to be similar. */
- int max_iterations_similar_transforms_;
-
- /** \brief The state of the convergence (e.g., why did the registration converge). */
- ConvergenceState convergence_state_;
-
- public:
- PCL_MAKE_ALIGNED_OPERATOR_NEW
- };
+ convergence_state_ = c;
}
-}
+
+protected:
+ /** \brief Calculate the mean squared error (MSE) of the distance for a given set of
+ * correspondences. \param[in] correspondences the given set of correspondences
+ */
+ inline double
+ calculateMSE(const pcl::Correspondences& correspondences) const
+ {
+ double mse = 0;
+ for (const auto& correspondence : correspondences)
+ mse += correspondence.distance;
+ mse /= double(correspondences.size());
+ return (mse);
+ }
+
+ /** \brief The number of iterations done by the registration loop so far. */
+ const int& iterations_;
+
+ /** \brief The current transformation obtained by the transformation estimation
+ * method. */
+ const Matrix4& transformation_;
+
+ /** \brief The current set of point correspondences between the source and the target.
+ */
+ const pcl::Correspondences& correspondences_;
+
+ /** \brief The MSE for the previous set of correspondences. */
+ double correspondences_prev_mse_;
+
+ /** \brief The MSE for the current set of correspondences. */
+ double correspondences_cur_mse_;
+
+ /** \brief The maximum nuyyGmber of iterations that the registration loop is to be
+ * executed. */
+ int max_iterations_;
+
+ /** \brief Specifys if the registration fails or converges when the maximum number of
+ * iterations is reached. */
+ bool failure_after_max_iter_;
+
+ /** \brief The rotation threshold is the relative rotation between two iterations (as
+ * angle cosine). */
+ double rotation_threshold_;
+
+ /** \brief The translation threshold is the relative translation between two
+ * iterations (0 if no translation). */
+ double translation_threshold_;
+
+ /** \brief The relative change from the previous MSE for the current set of
+ * correspondences, e.g. .1 means 10% change. */
+ double mse_threshold_relative_;
+
+ /** \brief The absolute change from the previous MSE for the current set of
+ * correspondences. */
+ double mse_threshold_absolute_;
+
+ /** \brief Internal counter for the number of iterations that the internal
+ * rotation, translation, and MSE differences are allowed to be similar. */
+ int iterations_similar_transforms_;
+
+ /** \brief The maximum number of iterations that the internal rotation,
+ * translation, and MSE differences are allowed to be similar. */
+ int max_iterations_similar_transforms_;
+
+ /** \brief The state of the convergence (e.g., why did the registration converge). */
+ ConvergenceState convergence_state_;
+
+public:
+ PCL_MAKE_ALIGNED_OPERATOR_NEW
+};
+} // namespace registration
+} // namespace pcl
#include <pcl/registration/impl/default_convergence_criteria.hpp>
#pragma once
-#include <pcl/registration/eigen.h>
+#include <Eigen/Core>
+
+#include <string.h>
+
+#include <algorithm>
#include <vector>
-namespace pcl
+namespace pcl {
+namespace distances {
+
+/** \brief Compute the median value from a set of doubles
+ * \param[in] fvec the set of doubles
+ * \param[in] m the number of doubles in the set
+ */
+inline double
+computeMedian(double* fvec, int m)
{
- namespace distances
- {
+ // Copy the values to vectors for faster sorting
+ std::vector<double> data(m);
+ memcpy(&data[0], fvec, sizeof(double) * m);
- /** \brief Compute the median value from a set of doubles
- * \param[in] fvec the set of doubles
- * \param[in] m the number of doubles in the set
- */
- inline double
- computeMedian (double *fvec, int m)
- {
- // Copy the values to vectors for faster sorting
- std::vector<double> data (m);
- memcpy (&data[0], fvec, sizeof (double) * m);
-
- std::nth_element(data.begin(), data.begin() + (data.size () >> 1), data.end());
- return (data[data.size () >> 1]);
- }
+ std::nth_element(data.begin(), data.begin() + (data.size() >> 1), data.end());
+ return (data[data.size() >> 1]);
+}
- /** \brief Use a Huber kernel to estimate the distance between two vectors
- * \param[in] p_src the first eigen vector
- * \param[in] p_tgt the second eigen vector
- * \param[in] sigma the sigma value
- */
- inline double
- huber (const Eigen::Vector4f &p_src, const Eigen::Vector4f &p_tgt, double sigma)
- {
- Eigen::Array4f diff = (p_tgt.array () - p_src.array ()).abs ();
- double norm = 0.0;
- for (int i = 0; i < 3; ++i)
- {
- if (diff[i] < sigma)
- norm += diff[i] * diff[i];
- else
- norm += 2.0 * sigma * diff[i] - sigma * sigma;
- }
- return (norm);
- }
+/** \brief Use a Huber kernel to estimate the distance between two vectors
+ * \param[in] p_src the first eigen vector
+ * \param[in] p_tgt the second eigen vector
+ * \param[in] sigma the sigma value
+ */
+inline double
+huber(const Eigen::Vector4f& p_src, const Eigen::Vector4f& p_tgt, double sigma)
+{
+ Eigen::Array4f diff = (p_tgt.array() - p_src.array()).abs();
+ double norm = 0.0;
+ for (int i = 0; i < 3; ++i) {
+ if (diff[i] < sigma)
+ norm += diff[i] * diff[i];
+ else
+ norm += 2.0 * sigma * diff[i] - sigma * sigma;
+ }
+ return (norm);
+}
- /** \brief Use a Huber kernel to estimate the distance between two vectors
- * \param[in] diff the norm difference between two vectors
- * \param[in] sigma the sigma value
- */
- inline double
- huber (double diff, double sigma)
- {
- double norm = 0.0;
- if (diff < sigma)
- norm += diff * diff;
- else
- norm += 2.0 * sigma * diff - sigma * sigma;
- return (norm);
- }
+/** \brief Use a Huber kernel to estimate the distance between two vectors
+ * \param[in] diff the norm difference between two vectors
+ * \param[in] sigma the sigma value
+ */
+inline double
+huber(double diff, double sigma)
+{
+ double norm = 0.0;
+ if (diff < sigma)
+ norm += diff * diff;
+ else
+ norm += 2.0 * sigma * diff - sigma * sigma;
+ return (norm);
+}
- /** \brief Use a Gedikli kernel to estimate the distance between two vectors
- * (for more information, see
- * \param[in] val the norm difference between two vectors
- * \param[in] clipping the clipping value
- * \param[in] slope the slope. Default: 4
- */
- inline double
- gedikli (double val, double clipping, double slope = 4)
- {
- return (1.0 / (1.0 + pow (std::abs(val) / clipping, slope)));
- }
+/** \brief Use a Gedikli kernel to estimate the distance between two vectors
+ * (for more information, see
+ * \param[in] val the norm difference between two vectors
+ * \param[in] clipping the clipping value
+ * \param[in] slope the slope. Default: 4
+ */
+inline double
+gedikli(double val, double clipping, double slope = 4)
+{
+ return (1.0 / (1.0 + pow(std::abs(val) / clipping, slope)));
+}
- /** \brief Compute the Manhattan distance between two eigen vectors.
- * \param[in] p_src the first eigen vector
- * \param[in] p_tgt the second eigen vector
- */
- inline double
- l1 (const Eigen::Vector4f &p_src, const Eigen::Vector4f &p_tgt)
- {
- return ((p_src.array () - p_tgt.array ()).abs ().sum ());
- }
+/** \brief Compute the Manhattan distance between two eigen vectors.
+ * \param[in] p_src the first eigen vector
+ * \param[in] p_tgt the second eigen vector
+ */
+inline double
+l1(const Eigen::Vector4f& p_src, const Eigen::Vector4f& p_tgt)
+{
+ return ((p_src.array() - p_tgt.array()).abs().sum());
+}
- /** \brief Compute the Euclidean distance between two eigen vectors.
- * \param[in] p_src the first eigen vector
- * \param[in] p_tgt the second eigen vector
- */
- inline double
- l2 (const Eigen::Vector4f &p_src, const Eigen::Vector4f &p_tgt)
- {
- return ((p_src - p_tgt).norm ());
- }
+/** \brief Compute the Euclidean distance between two eigen vectors.
+ * \param[in] p_src the first eigen vector
+ * \param[in] p_tgt the second eigen vector
+ */
+inline double
+l2(const Eigen::Vector4f& p_src, const Eigen::Vector4f& p_tgt)
+{
+ return ((p_src - p_tgt).norm());
+}
- /** \brief Compute the squared Euclidean distance between two eigen vectors.
- * \param[in] p_src the first eigen vector
- * \param[in] p_tgt the second eigen vector
- */
- inline double
- l2Sqr (const Eigen::Vector4f &p_src, const Eigen::Vector4f &p_tgt)
- {
- return ((p_src - p_tgt).squaredNorm ());
- }
- }
+/** \brief Compute the squared Euclidean distance between two eigen vectors.
+ * \param[in] p_src the first eigen vector
+ * \param[in] p_tgt the second eigen vector
+ */
+inline double
+l2Sqr(const Eigen::Vector4f& p_src, const Eigen::Vector4f& p_tgt)
+{
+ return ((p_src - p_tgt).squaredNorm());
}
+} // namespace distances
+} // namespace pcl
#include <pcl/memory.h>
#include <pcl/pcl_macros.h>
-namespace pcl
-{
- namespace registration
- {
- /** \brief @b NullMeasurement struct
- * \author Nicola Fioraio
- * \ingroup registration
- */
- struct NullMeasurement
- {};
+namespace pcl {
+namespace registration {
+/** \brief @b NullMeasurement struct
+ * \author Nicola Fioraio
+ * \ingroup registration
+ */
+struct NullMeasurement {};
- /** \brief @b PoseMeasurement struct
- * \author Nicola Fioraio
- * \ingroup registration
- */
- template <typename VertexT, typename InformationT>
- struct PoseMeasurement
- {
- VertexT v_start, v_end;
- Eigen::Matrix4f relative_transformation;
- InformationT information_matrix;
- PCL_MAKE_ALIGNED_OPERATOR_NEW
+/** \brief @b PoseMeasurement struct
+ * \author Nicola Fioraio
+ * \ingroup registration
+ */
+template <typename VertexT, typename InformationT>
+struct PoseMeasurement {
+ VertexT v_start, v_end;
+ Eigen::Matrix4f relative_transformation;
+ InformationT information_matrix;
+ PCL_MAKE_ALIGNED_OPERATOR_NEW
- PoseMeasurement (const VertexT& v_s, const VertexT& v_e, const Eigen::Matrix4f& tr, const InformationT& mtx)
- : v_start (v_s), v_end (v_e), relative_transformation (tr), information_matrix (mtx) {}
- };
- }
-}
+ PoseMeasurement(const VertexT& v_s,
+ const VertexT& v_e,
+ const Eigen::Matrix4f& tr,
+ const InformationT& mtx)
+ : v_start(v_s), v_end(v_e), relative_transformation(tr), information_matrix(mtx)
+ {}
+};
+} // namespace registration
+} // namespace pcl
#pragma once
#if defined __GNUC__
-# pragma GCC system_header
+#pragma GCC system_header
#endif
+PCL_DEPRECATED_HEADER(1, 15, "Please include the needed eigen headers directly.")
#include <Eigen/Core>
+#include <Eigen/Dense>
#include <Eigen/Geometry>
#include <unsupported/Eigen/Polynomials>
-#include <Eigen/Dense>
#pragma once
+#include <pcl/registration/boost_graph.h>
+#include <pcl/registration/icp.h>
+#include <pcl/registration/registration.h>
#include <pcl/memory.h>
-#include <pcl/pcl_macros.h>
#include <pcl/pcl_base.h>
-#include <pcl/point_types.h>
+#include <pcl/pcl_macros.h>
#include <pcl/point_cloud.h>
-#include <pcl/registration/registration.h>
-#include <pcl/registration/boost.h>
-#include <pcl/registration/eigen.h>
-#include <pcl/registration/icp.h>
-#include <pcl/registration/boost_graph.h>
+#include <pcl/point_types.h>
+
+namespace pcl {
+namespace registration {
+/** \brief @b ELCH (Explicit Loop Closing Heuristic) class
+ * \author Jochen Sprickerhof
+ * \ingroup registration
+ */
+template <typename PointT>
+class ELCH : public PCLBase<PointT> {
+public:
+ using Ptr = shared_ptr<ELCH<PointT>>;
+ using ConstPtr = shared_ptr<const ELCH<PointT>>;
+
+ using PointCloud = pcl::PointCloud<PointT>;
+ using PointCloudPtr = typename PointCloud::Ptr;
+ using PointCloudConstPtr = typename PointCloud::ConstPtr;
+
+ struct Vertex {
+ Vertex() : cloud() {}
+ PointCloudPtr cloud;
+ Eigen::Affine3f transform;
+ };
+
+ /** \brief graph structure to hold the SLAM graph */
+ using LoopGraph = boost::adjacency_list<boost::listS,
+ boost::eigen_vecS,
+ boost::undirectedS,
+ Vertex,
+ boost::no_property>;
+
+ using LoopGraphPtr = shared_ptr<LoopGraph>;
+
+ using Registration = pcl::Registration<PointT, PointT>;
+ using RegistrationPtr = typename Registration::Ptr;
+ using RegistrationConstPtr = typename Registration::ConstPtr;
+
+ /** \brief Empty constructor. */
+ ELCH()
+ : loop_graph_(new LoopGraph)
+ , loop_start_(0)
+ , loop_end_(0)
+ , reg_(new pcl::IterativeClosestPoint<PointT, PointT>)
+ , compute_loop_(true)
+ , vd_(){};
+
+ /** \brief Empty destructor */
+ ~ELCH() {}
+
+ /** \brief Add a new point cloud to the internal graph.
+ * \param[in] cloud the new point cloud
+ */
+ inline void
+ addPointCloud(PointCloudPtr cloud)
+ {
+ typename boost::graph_traits<LoopGraph>::vertex_descriptor vd =
+ add_vertex(*loop_graph_);
+ (*loop_graph_)[vd].cloud = cloud;
+ if (num_vertices(*loop_graph_) > 1)
+ add_edge(vd_, vd, *loop_graph_);
+ vd_ = vd;
+ }
+
+ /** \brief Getter for the internal graph. */
+ inline LoopGraphPtr
+ getLoopGraph()
+ {
+ return (loop_graph_);
+ }
+
+ /** \brief Setter for a new internal graph.
+ * \param[in] loop_graph the new graph
+ */
+ inline void
+ setLoopGraph(LoopGraphPtr loop_graph)
+ {
+ loop_graph_ = loop_graph;
+ }
+
+ /** \brief Getter for the first scan of a loop. */
+ inline typename boost::graph_traits<LoopGraph>::vertex_descriptor
+ getLoopStart()
+ {
+ return (loop_start_);
+ }
+
+ /** \brief Setter for the first scan of a loop.
+ * \param[in] loop_start the scan that starts the loop
+ */
+ inline void
+ setLoopStart(
+ const typename boost::graph_traits<LoopGraph>::vertex_descriptor& loop_start)
+ {
+ loop_start_ = loop_start;
+ }
+
+ /** \brief Getter for the last scan of a loop. */
+ inline typename boost::graph_traits<LoopGraph>::vertex_descriptor
+ getLoopEnd()
+ {
+ return (loop_end_);
+ }
+
+ /** \brief Setter for the last scan of a loop.
+ * \param[in] loop_end the scan that ends the loop
+ */
+ inline void
+ setLoopEnd(const typename boost::graph_traits<LoopGraph>::vertex_descriptor& loop_end)
+ {
+ loop_end_ = loop_end;
+ }
-namespace pcl
-{
- namespace registration
+ /** \brief Getter for the registration algorithm. */
+ inline RegistrationPtr
+ getReg()
{
- /** \brief @b ELCH (Explicit Loop Closing Heuristic) class
- * \author Jochen Sprickerhof
- * \ingroup registration
- */
- template <typename PointT>
- class ELCH : public PCLBase<PointT>
- {
- public:
- using Ptr = shared_ptr<ELCH<PointT> >;
- using ConstPtr = shared_ptr<const ELCH<PointT> >;
-
- using PointCloud = pcl::PointCloud<PointT>;
- using PointCloudPtr = typename PointCloud::Ptr;
- using PointCloudConstPtr = typename PointCloud::ConstPtr;
-
- struct Vertex
- {
- Vertex () : cloud () {}
- PointCloudPtr cloud;
- Eigen::Affine3f transform;
- };
-
- /** \brief graph structure to hold the SLAM graph */
- using LoopGraph = boost::adjacency_list<
- boost::listS, boost::eigen_vecS, boost::undirectedS,
- Vertex,
- boost::no_property>;
-
- using LoopGraphPtr = shared_ptr<LoopGraph>;
-
- using Registration = pcl::Registration<PointT, PointT>;
- using RegistrationPtr = typename Registration::Ptr;
- using RegistrationConstPtr = typename Registration::ConstPtr;
-
- /** \brief Empty constructor. */
- ELCH () :
- loop_graph_ (new LoopGraph),
- loop_start_ (0),
- loop_end_ (0),
- reg_ (new pcl::IterativeClosestPoint<PointT, PointT>),
- compute_loop_ (true),
- vd_ ()
- {};
-
- /** \brief Empty destructor */
- ~ELCH () {}
-
- /** \brief Add a new point cloud to the internal graph.
- * \param[in] cloud the new point cloud
- */
- inline void
- addPointCloud (PointCloudPtr cloud)
- {
- typename boost::graph_traits<LoopGraph>::vertex_descriptor vd = add_vertex (*loop_graph_);
- (*loop_graph_)[vd].cloud = cloud;
- if (num_vertices (*loop_graph_) > 1)
- add_edge (vd_, vd, *loop_graph_);
- vd_ = vd;
- }
-
- /** \brief Getter for the internal graph. */
- inline LoopGraphPtr
- getLoopGraph ()
- {
- return (loop_graph_);
- }
-
- /** \brief Setter for a new internal graph.
- * \param[in] loop_graph the new graph
- */
- inline void
- setLoopGraph (LoopGraphPtr loop_graph)
- {
- loop_graph_ = loop_graph;
- }
-
- /** \brief Getter for the first scan of a loop. */
- inline typename boost::graph_traits<LoopGraph>::vertex_descriptor
- getLoopStart ()
- {
- return (loop_start_);
- }
-
- /** \brief Setter for the first scan of a loop.
- * \param[in] loop_start the scan that starts the loop
- */
- inline void
- setLoopStart (const typename boost::graph_traits<LoopGraph>::vertex_descriptor &loop_start)
- {
- loop_start_ = loop_start;
- }
-
- /** \brief Getter for the last scan of a loop. */
- inline typename boost::graph_traits<LoopGraph>::vertex_descriptor
- getLoopEnd ()
- {
- return (loop_end_);
- }
-
- /** \brief Setter for the last scan of a loop.
- * \param[in] loop_end the scan that ends the loop
- */
- inline void
- setLoopEnd (const typename boost::graph_traits<LoopGraph>::vertex_descriptor &loop_end)
- {
- loop_end_ = loop_end;
- }
-
- /** \brief Getter for the registration algorithm. */
- inline RegistrationPtr
- getReg ()
- {
- return (reg_);
- }
-
- /** \brief Setter for the registration algorithm.
- * \param[in] reg the registration algorithm used to compute the transformation between the start and the end of the loop
- */
- inline void
- setReg (RegistrationPtr reg)
- {
- reg_ = reg;
- }
-
- /** \brief Getter for the transformation between the first and the last scan. */
- inline Eigen::Matrix4f
- getLoopTransform ()
- {
- return (loop_transform_);
- }
-
- /** \brief Setter for the transformation between the first and the last scan.
- * \param[in] loop_transform the transformation between the first and the last scan
- */
- inline void
- setLoopTransform (const Eigen::Matrix4f &loop_transform)
- {
- loop_transform_ = loop_transform;
- compute_loop_ = false;
- }
-
- /** \brief Computes new poses for all point clouds by closing the loop
- * between start and end point cloud. This will transform all given point
- * clouds for now!
- */
- void
- compute ();
-
- protected:
- using PCLBase<PointT>::deinitCompute;
-
- /** \brief This method should get called before starting the actual computation. */
- virtual bool
- initCompute ();
-
- private:
- /** \brief graph structure for the internal optimization graph */
- using LOAGraph = boost::adjacency_list<
- boost::listS, boost::vecS, boost::undirectedS,
- boost::no_property,
- boost::property< boost::edge_weight_t, double > >;
-
- /**
- * graph balancer algorithm computes the weights
- * @param[in] g the graph
- * @param[in] f index of the first node
- * @param[in] l index of the last node
- * @param[out] weights array for the weights
- */
- void
- loopOptimizerAlgorithm (LOAGraph &g, double *weights);
-
- /** \brief The internal loop graph. */
- LoopGraphPtr loop_graph_;
-
- /** \brief The first scan of the loop. */
- typename boost::graph_traits<LoopGraph>::vertex_descriptor loop_start_;
-
- /** \brief The last scan of the loop. */
- typename boost::graph_traits<LoopGraph>::vertex_descriptor loop_end_;
-
- /** \brief The registration object used to close the loop. */
- RegistrationPtr reg_;
-
- /** \brief The transformation between that start and end of the loop. */
- Eigen::Matrix4f loop_transform_;
- bool compute_loop_;
-
- /** \brief previously added node in the loop_graph_. */
- typename boost::graph_traits<LoopGraph>::vertex_descriptor vd_;
-
- public:
- PCL_MAKE_ALIGNED_OPERATOR_NEW
- };
+ return (reg_);
}
-}
+
+ /** \brief Setter for the registration algorithm.
+ * \param[in] reg the registration algorithm used to compute the transformation
+ * between the start and the end of the loop
+ */
+ inline void
+ setReg(RegistrationPtr reg)
+ {
+ reg_ = reg;
+ }
+
+ /** \brief Getter for the transformation between the first and the last scan. */
+ inline Eigen::Matrix4f
+ getLoopTransform()
+ {
+ return (loop_transform_);
+ }
+
+ /** \brief Setter for the transformation between the first and the last scan.
+ * \param[in] loop_transform the transformation between the first and the last scan
+ */
+ inline void
+ setLoopTransform(const Eigen::Matrix4f& loop_transform)
+ {
+ loop_transform_ = loop_transform;
+ compute_loop_ = false;
+ }
+
+ /** \brief Computes new poses for all point clouds by closing the loop
+ * between start and end point cloud. This will transform all given point
+ * clouds for now!
+ */
+ void
+ compute();
+
+protected:
+ using PCLBase<PointT>::deinitCompute;
+
+ /** \brief This method should get called before starting the actual computation. */
+ virtual bool
+ initCompute();
+
+private:
+ /** \brief graph structure for the internal optimization graph */
+ using LOAGraph = boost::adjacency_list<boost::listS,
+ boost::vecS,
+ boost::undirectedS,
+ boost::no_property,
+ boost::property<boost::edge_weight_t, double>>;
+
+ /**
+ * graph balancer algorithm computes the weights
+ * @param[in] g the graph
+ * @param[in] f index of the first node
+ * @param[in] l index of the last node
+ * @param[out] weights array for the weights
+ */
+ void
+ loopOptimizerAlgorithm(LOAGraph& g, double* weights);
+
+ /** \brief The internal loop graph. */
+ LoopGraphPtr loop_graph_;
+
+ /** \brief The first scan of the loop. */
+ typename boost::graph_traits<LoopGraph>::vertex_descriptor loop_start_;
+
+ /** \brief The last scan of the loop. */
+ typename boost::graph_traits<LoopGraph>::vertex_descriptor loop_end_;
+
+ /** \brief The registration object used to close the loop. */
+ RegistrationPtr reg_;
+
+ /** \brief The transformation between that start and end of the loop. */
+ Eigen::Matrix4f loop_transform_;
+ bool compute_loop_;
+
+ /** \brief previously added node in the loop_graph_. */
+ typename boost::graph_traits<LoopGraph>::vertex_descriptor vd_;
+
+public:
+ PCL_MAKE_ALIGNED_OPERATOR_NEW
+};
+} // namespace registration
+} // namespace pcl
#include <pcl/registration/impl/elch.hpp>
#include <pcl/exceptions.h>
-namespace pcl
-{
- /** \class SolverDidntConvergeException
- * \brief An exception that is thrown when the non linear solver didn't converge
- */
- class PCL_EXPORTS SolverDidntConvergeException : public PCLException
- {
- public:
-
- SolverDidntConvergeException (const std::string& error_description,
- const char* file_name = nullptr,
- const char* function_name = nullptr,
- unsigned line_number = 0)
- : pcl::PCLException (error_description, file_name, function_name, line_number) { }
- } ;
+namespace pcl {
+/** \class SolverDidntConvergeException
+ * \brief An exception that is thrown when the non linear solver didn't converge
+ */
+class PCL_EXPORTS SolverDidntConvergeException : public PCLException {
+public:
+ SolverDidntConvergeException(const std::string& error_description,
+ const char* file_name = nullptr,
+ const char* function_name = nullptr,
+ unsigned line_number = 0)
+ : pcl::PCLException(error_description, file_name, function_name, line_number)
+ {}
+};
- /** \class NotEnoughPointsException
- * \brief An exception that is thrown when the number of correspondents is not equal
- * to the minimum required
- */
- class PCL_EXPORTS NotEnoughPointsException : public PCLException
- {
- public:
-
- NotEnoughPointsException (const std::string& error_description,
- const char* file_name = nullptr,
- const char* function_name = nullptr,
- unsigned line_number = 0)
- : pcl::PCLException (error_description, file_name, function_name, line_number) { }
- } ;
-}
+/** \class NotEnoughPointsException
+ * \brief An exception that is thrown when the number of correspondents is not equal
+ * to the minimum required
+ */
+class PCL_EXPORTS NotEnoughPointsException : public PCLException {
+public:
+ NotEnoughPointsException(const std::string& error_description,
+ const char* file_name = nullptr,
+ const char* function_name = nullptr,
+ unsigned line_number = 0)
+ : pcl::PCLException(error_description, file_name, function_name, line_number)
+ {}
+};
+} // namespace pcl
#pragma once
-#include <pcl/registration/icp.h>
#include <pcl/registration/bfgs.h>
+#include <pcl/registration/icp.h>
+
+namespace pcl {
+/** \brief GeneralizedIterativeClosestPoint is an ICP variant that implements the
+ * generalized iterative closest point algorithm as described by Alex Segal et al. in
+ * http://www.robots.ox.ac.uk/~avsegal/resources/papers/Generalized_ICP.pdf
+ * The approach is based on using anistropic cost functions to optimize the alignment
+ * after closest point assignments have been made.
+ * The original code uses GSL and ANN while in ours we use an eigen mapped BFGS and
+ * FLANN.
+ * \author Nizar Sallem
+ * \ingroup registration
+ */
+template <typename PointSource, typename PointTarget>
+class GeneralizedIterativeClosestPoint
+: public IterativeClosestPoint<PointSource, PointTarget> {
+public:
+ using IterativeClosestPoint<PointSource, PointTarget>::reg_name_;
+ using IterativeClosestPoint<PointSource, PointTarget>::getClassName;
+ using IterativeClosestPoint<PointSource, PointTarget>::indices_;
+ using IterativeClosestPoint<PointSource, PointTarget>::target_;
+ using IterativeClosestPoint<PointSource, PointTarget>::input_;
+ using IterativeClosestPoint<PointSource, PointTarget>::tree_;
+ using IterativeClosestPoint<PointSource, PointTarget>::tree_reciprocal_;
+ using IterativeClosestPoint<PointSource, PointTarget>::nr_iterations_;
+ using IterativeClosestPoint<PointSource, PointTarget>::max_iterations_;
+ using IterativeClosestPoint<PointSource, PointTarget>::previous_transformation_;
+ using IterativeClosestPoint<PointSource, PointTarget>::final_transformation_;
+ using IterativeClosestPoint<PointSource, PointTarget>::transformation_;
+ using IterativeClosestPoint<PointSource, PointTarget>::transformation_epsilon_;
+ using IterativeClosestPoint<PointSource, PointTarget>::converged_;
+ using IterativeClosestPoint<PointSource, PointTarget>::corr_dist_threshold_;
+ using IterativeClosestPoint<PointSource, PointTarget>::inlier_threshold_;
+ using IterativeClosestPoint<PointSource, PointTarget>::min_number_correspondences_;
+ using IterativeClosestPoint<PointSource, PointTarget>::update_visualizer_;
+
+ using PointCloudSource = pcl::PointCloud<PointSource>;
+ using PointCloudSourcePtr = typename PointCloudSource::Ptr;
+ using PointCloudSourceConstPtr = typename PointCloudSource::ConstPtr;
+
+ using PointCloudTarget = pcl::PointCloud<PointTarget>;
+ using PointCloudTargetPtr = typename PointCloudTarget::Ptr;
+ using PointCloudTargetConstPtr = typename PointCloudTarget::ConstPtr;
+
+ using PointIndicesPtr = PointIndices::Ptr;
+ using PointIndicesConstPtr = PointIndices::ConstPtr;
+
+ using MatricesVector =
+ std::vector<Eigen::Matrix3d, Eigen::aligned_allocator<Eigen::Matrix3d>>;
+ using MatricesVectorPtr = shared_ptr<MatricesVector>;
+ using MatricesVectorConstPtr = shared_ptr<const MatricesVector>;
+
+ using InputKdTree = typename Registration<PointSource, PointTarget>::KdTree;
+ using InputKdTreePtr = typename Registration<PointSource, PointTarget>::KdTreePtr;
+
+ using Ptr = shared_ptr<GeneralizedIterativeClosestPoint<PointSource, PointTarget>>;
+ using ConstPtr =
+ shared_ptr<const GeneralizedIterativeClosestPoint<PointSource, PointTarget>>;
+
+ using Vector6d = Eigen::Matrix<double, 6, 1>;
+
+ /** \brief Empty constructor. */
+ GeneralizedIterativeClosestPoint()
+ : k_correspondences_(20)
+ , gicp_epsilon_(0.001)
+ , rotation_epsilon_(2e-3)
+ , mahalanobis_(0)
+ , max_inner_iterations_(20)
+ , translation_gradient_tolerance_(1e-2)
+ , rotation_gradient_tolerance_(1e-2)
+ {
+ min_number_correspondences_ = 4;
+ reg_name_ = "GeneralizedIterativeClosestPoint";
+ max_iterations_ = 200;
+ transformation_epsilon_ = 5e-4;
+ corr_dist_threshold_ = 5.;
+ rigid_transformation_estimation_ = [this](const PointCloudSource& cloud_src,
+ const pcl::Indices& indices_src,
+ const PointCloudTarget& cloud_tgt,
+ const pcl::Indices& indices_tgt,
+ Eigen::Matrix4f& transformation_matrix) {
+ estimateRigidTransformationBFGS(
+ cloud_src, indices_src, cloud_tgt, indices_tgt, transformation_matrix);
+ };
+ }
+
+ /** \brief Provide a pointer to the input dataset
+ * \param cloud the const boost shared pointer to a PointCloud message
+ */
+ inline void
+ setInputSource(const PointCloudSourceConstPtr& cloud) override
+ {
+
+ if (cloud->points.empty()) {
+ PCL_ERROR(
+ "[pcl::%s::setInputSource] Invalid or empty point cloud dataset given!\n",
+ getClassName().c_str());
+ return;
+ }
+ PointCloudSource input = *cloud;
+ // Set all the point.data[3] values to 1 to aid the rigid transformation
+ for (std::size_t i = 0; i < input.size(); ++i)
+ input[i].data[3] = 1.0;
+
+ pcl::IterativeClosestPoint<PointSource, PointTarget>::setInputSource(cloud);
+ input_covariances_.reset();
+ }
+
+ /** \brief Provide a pointer to the covariances of the input source (if computed
+ * externally!). If not set, GeneralizedIterativeClosestPoint will compute the
+ * covariances itself. Make sure to set the covariances AFTER setting the input source
+ * point cloud (setting the input source point cloud will reset the covariances).
+ * \param[in] covariances the input source covariances
+ */
+ inline void
+ setSourceCovariances(const MatricesVectorPtr& covariances)
+ {
+ input_covariances_ = covariances;
+ }
+
+ /** \brief Provide a pointer to the input target (e.g., the point cloud that we want
+ * to align the input source to) \param[in] target the input point cloud target
+ */
+ inline void
+ setInputTarget(const PointCloudTargetConstPtr& target) override
+ {
+ pcl::IterativeClosestPoint<PointSource, PointTarget>::setInputTarget(target);
+ target_covariances_.reset();
+ }
+
+ /** \brief Provide a pointer to the covariances of the input target (if computed
+ * externally!). If not set, GeneralizedIterativeClosestPoint will compute the
+ * covariances itself. Make sure to set the covariances AFTER setting the input source
+ * point cloud (setting the input source point cloud will reset the covariances).
+ * \param[in] covariances the input target covariances
+ */
+ inline void
+ setTargetCovariances(const MatricesVectorPtr& covariances)
+ {
+ target_covariances_ = covariances;
+ }
+
+ /** \brief Estimate a rigid rotation transformation between a source and a target
+ * point cloud using an iterative non-linear Levenberg-Marquardt approach. \param[in]
+ * cloud_src the source point cloud dataset \param[in] indices_src the vector of
+ * indices describing the points of interest in \a cloud_src
+ * \param[in] cloud_tgt the target point cloud dataset
+ * \param[in] indices_tgt the vector of indices describing
+ * the correspondences of the interest points from \a indices_src
+ * \param[out] transformation_matrix the resultant transformation matrix
+ */
+ void
+ estimateRigidTransformationBFGS(const PointCloudSource& cloud_src,
+ const pcl::Indices& indices_src,
+ const PointCloudTarget& cloud_tgt,
+ const pcl::Indices& indices_tgt,
+ Eigen::Matrix4f& transformation_matrix);
+
+ /** \brief \return Mahalanobis distance matrix for the given point index */
+ inline const Eigen::Matrix3d&
+ mahalanobis(std::size_t index) const
+ {
+ assert(index < mahalanobis_.size());
+ return mahalanobis_[index];
+ }
+
+ /** \brief Computes rotation matrix derivative.
+ * rotation matrix is obtainded from rotation angles x[3], x[4] and x[5]
+ * \return d/d_rx, d/d_ry and d/d_rz respectively in g[3], g[4] and g[5]
+ * param x array representing 3D transformation
+ * param R rotation matrix
+ * param g gradient vector
+ */
+ void
+ computeRDerivative(const Vector6d& x, const Eigen::Matrix3d& R, Vector6d& g) const;
+
+ /** \brief Set the rotation epsilon (maximum allowable difference between two
+ * consecutive rotations) in order for an optimization to be considered as having
+ * converged to the final solution.
+ * \param epsilon the rotation epsilon
+ */
+ inline void
+ setRotationEpsilon(double epsilon)
+ {
+ rotation_epsilon_ = epsilon;
+ }
+
+ /** \brief Get the rotation epsilon (maximum allowable difference between two
+ * consecutive rotations) as set by the user.
+ */
+ inline double
+ getRotationEpsilon() const
+ {
+ return rotation_epsilon_;
+ }
+
+ /** \brief Set the number of neighbors used when selecting a point neighbourhood
+ * to compute covariances.
+ * A higher value will bring more accurate covariance matrix but will make
+ * covariances computation slower.
+ * \param k the number of neighbors to use when computing covariances
+ */
+ void
+ setCorrespondenceRandomness(int k)
+ {
+ k_correspondences_ = k;
+ }
+
+ /** \brief Get the number of neighbors used when computing covariances as set by
+ * the user
+ */
+ int
+ getCorrespondenceRandomness() const
+ {
+ return k_correspondences_;
+ }
+
+ /** \brief Set maximum number of iterations at the optimization step
+ * \param[in] max maximum number of iterations for the optimizer
+ */
+ void
+ setMaximumOptimizerIterations(int max)
+ {
+ max_inner_iterations_ = max;
+ }
+
+ /** \brief Return maximum number of iterations at the optimization step
+ */
+ int
+ getMaximumOptimizerIterations() const
+ {
+ return max_inner_iterations_;
+ }
+
+ /** \brief Set the minimal translation gradient threshold for early optimization stop
+ * \param[in] tolerance translation gradient threshold in meters
+ */
+ void
+ setTranslationGradientTolerance(double tolerance)
+ {
+ translation_gradient_tolerance_ = tolerance;
+ }
+
+ /** \brief Return the minimal translation gradient threshold for early optimization
+ * stop
+ */
+ double
+ getTranslationGradientTolerance() const
+ {
+ return translation_gradient_tolerance_;
+ }
+
+ /** \brief Set the minimal rotation gradient threshold for early optimization stop
+ * \param[in] tolerance rotation gradient threshold in radians
+ */
+ void
+ setRotationGradientTolerance(double tolerance)
+ {
+ rotation_gradient_tolerance_ = tolerance;
+ }
-namespace pcl
-{
- /** \brief GeneralizedIterativeClosestPoint is an ICP variant that implements the
- * generalized iterative closest point algorithm as described by Alex Segal et al. in
- * http://www.robots.ox.ac.uk/~avsegal/resources/papers/Generalized_ICP.pdf
- * The approach is based on using anistropic cost functions to optimize the alignment
- * after closest point assignments have been made.
- * The original code uses GSL and ANN while in ours we use an eigen mapped BFGS and
- * FLANN.
- * \author Nizar Sallem
- * \ingroup registration
- */
- template <typename PointSource, typename PointTarget>
- class GeneralizedIterativeClosestPoint : public IterativeClosestPoint<PointSource, PointTarget>
+ /** \brief Return the minimal rotation gradient threshold for early optimization stop
+ */
+ double
+ getRotationGradientTolerance() const
{
- public:
- using IterativeClosestPoint<PointSource, PointTarget>::reg_name_;
- using IterativeClosestPoint<PointSource, PointTarget>::getClassName;
- using IterativeClosestPoint<PointSource, PointTarget>::indices_;
- using IterativeClosestPoint<PointSource, PointTarget>::target_;
- using IterativeClosestPoint<PointSource, PointTarget>::input_;
- using IterativeClosestPoint<PointSource, PointTarget>::tree_;
- using IterativeClosestPoint<PointSource, PointTarget>::tree_reciprocal_;
- using IterativeClosestPoint<PointSource, PointTarget>::nr_iterations_;
- using IterativeClosestPoint<PointSource, PointTarget>::max_iterations_;
- using IterativeClosestPoint<PointSource, PointTarget>::previous_transformation_;
- using IterativeClosestPoint<PointSource, PointTarget>::final_transformation_;
- using IterativeClosestPoint<PointSource, PointTarget>::transformation_;
- using IterativeClosestPoint<PointSource, PointTarget>::transformation_epsilon_;
- using IterativeClosestPoint<PointSource, PointTarget>::converged_;
- using IterativeClosestPoint<PointSource, PointTarget>::corr_dist_threshold_;
- using IterativeClosestPoint<PointSource, PointTarget>::inlier_threshold_;
- using IterativeClosestPoint<PointSource, PointTarget>::min_number_correspondences_;
- using IterativeClosestPoint<PointSource, PointTarget>::update_visualizer_;
-
- using PointCloudSource = pcl::PointCloud<PointSource>;
- using PointCloudSourcePtr = typename PointCloudSource::Ptr;
- using PointCloudSourceConstPtr = typename PointCloudSource::ConstPtr;
-
- using PointCloudTarget = pcl::PointCloud<PointTarget>;
- using PointCloudTargetPtr = typename PointCloudTarget::Ptr;
- using PointCloudTargetConstPtr = typename PointCloudTarget::ConstPtr;
-
- using PointIndicesPtr = PointIndices::Ptr;
- using PointIndicesConstPtr = PointIndices::ConstPtr;
-
- using MatricesVector = std::vector< Eigen::Matrix3d, Eigen::aligned_allocator<Eigen::Matrix3d> >;
- using MatricesVectorPtr = shared_ptr<MatricesVector>;
- using MatricesVectorConstPtr = shared_ptr<const MatricesVector>;
-
- using InputKdTree = typename Registration<PointSource, PointTarget>::KdTree;
- using InputKdTreePtr = typename Registration<PointSource, PointTarget>::KdTreePtr;
-
- using Ptr = shared_ptr< GeneralizedIterativeClosestPoint<PointSource, PointTarget> >;
- using ConstPtr = shared_ptr< const GeneralizedIterativeClosestPoint<PointSource, PointTarget> >;
-
-
- using Vector6d = Eigen::Matrix<double, 6, 1>;
-
- /** \brief Empty constructor. */
- GeneralizedIterativeClosestPoint ()
- : k_correspondences_(20)
- , gicp_epsilon_(0.001)
- , rotation_epsilon_(2e-3)
- , mahalanobis_(0)
- , max_inner_iterations_(20)
- ,translation_gradient_tolerance_(1e-2)
- ,rotation_gradient_tolerance_(1e-2)
- {
- min_number_correspondences_ = 4;
- reg_name_ = "GeneralizedIterativeClosestPoint";
- max_iterations_ = 200;
- transformation_epsilon_ = 5e-4;
- corr_dist_threshold_ = 5.;
- rigid_transformation_estimation_ = [this] (const PointCloudSource& cloud_src,
- const std::vector<int>& indices_src,
- const PointCloudTarget& cloud_tgt,
- const std::vector<int>& indices_tgt,
- Eigen::Matrix4f& transformation_matrix)
- {
- estimateRigidTransformationBFGS (cloud_src, indices_src, cloud_tgt, indices_tgt, transformation_matrix);
- };
- }
-
- /** \brief Provide a pointer to the input dataset
- * \param cloud the const boost shared pointer to a PointCloud message
- */
- inline void
- setInputSource (const PointCloudSourceConstPtr &cloud) override
- {
-
- if (cloud->points.empty ())
- {
- PCL_ERROR ("[pcl::%s::setInputSource] Invalid or empty point cloud dataset given!\n", getClassName ().c_str ());
- return;
- }
- PointCloudSource input = *cloud;
- // Set all the point.data[3] values to 1 to aid the rigid transformation
- for (std::size_t i = 0; i < input.size (); ++i)
- input[i].data[3] = 1.0;
-
- pcl::IterativeClosestPoint<PointSource, PointTarget>::setInputSource (cloud);
- input_covariances_.reset ();
- }
-
- /** \brief Provide a pointer to the covariances of the input source (if computed externally!).
- * If not set, GeneralizedIterativeClosestPoint will compute the covariances itself.
- * Make sure to set the covariances AFTER setting the input source point cloud (setting the input source point cloud will reset the covariances).
- * \param[in] covariances the input source covariances
- */
- inline void
- setSourceCovariances (const MatricesVectorPtr& covariances)
- {
- input_covariances_ = covariances;
- }
-
- /** \brief Provide a pointer to the input target (e.g., the point cloud that we want to align the input source to)
- * \param[in] target the input point cloud target
- */
- inline void
- setInputTarget (const PointCloudTargetConstPtr &target) override
- {
- pcl::IterativeClosestPoint<PointSource, PointTarget>::setInputTarget(target);
- target_covariances_.reset ();
- }
-
- /** \brief Provide a pointer to the covariances of the input target (if computed externally!).
- * If not set, GeneralizedIterativeClosestPoint will compute the covariances itself.
- * Make sure to set the covariances AFTER setting the input source point cloud (setting the input source point cloud will reset the covariances).
- * \param[in] covariances the input target covariances
- */
- inline void
- setTargetCovariances (const MatricesVectorPtr& covariances)
- {
- target_covariances_ = covariances;
- }
-
- /** \brief Estimate a rigid rotation transformation between a source and a target point cloud using an iterative
- * non-linear Levenberg-Marquardt approach.
- * \param[in] cloud_src the source point cloud dataset
- * \param[in] indices_src the vector of indices describing the points of interest in \a cloud_src
- * \param[in] cloud_tgt the target point cloud dataset
- * \param[in] indices_tgt the vector of indices describing the correspondences of the interest points from \a indices_src
- * \param[out] transformation_matrix the resultant transformation matrix
- */
- void
- estimateRigidTransformationBFGS (const PointCloudSource &cloud_src,
- const std::vector<int> &indices_src,
- const PointCloudTarget &cloud_tgt,
- const std::vector<int> &indices_tgt,
- Eigen::Matrix4f &transformation_matrix);
-
- /** \brief \return Mahalanobis distance matrix for the given point index */
- inline const Eigen::Matrix3d& mahalanobis(std::size_t index) const
- {
- assert(index < mahalanobis_.size());
- return mahalanobis_[index];
- }
-
- /** \brief Computes rotation matrix derivative.
- * rotation matrix is obtainded from rotation angles x[3], x[4] and x[5]
- * \return d/d_rx, d/d_ry and d/d_rz respectively in g[3], g[4] and g[5]
- * param x array representing 3D transformation
- * param R rotation matrix
- * param g gradient vector
- */
- void
- computeRDerivative(const Vector6d &x, const Eigen::Matrix3d &R, Vector6d &g) const;
-
- /** \brief Set the rotation epsilon (maximum allowable difference between two
- * consecutive rotations) in order for an optimization to be considered as having
- * converged to the final solution.
- * \param epsilon the rotation epsilon
- */
- inline void
- setRotationEpsilon (double epsilon) { rotation_epsilon_ = epsilon; }
-
- /** \brief Get the rotation epsilon (maximum allowable difference between two
- * consecutive rotations) as set by the user.
- */
- inline double
- getRotationEpsilon () const { return rotation_epsilon_; }
-
- /** \brief Set the number of neighbors used when selecting a point neighbourhood
- * to compute covariances.
- * A higher value will bring more accurate covariance matrix but will make
- * covariances computation slower.
- * \param k the number of neighbors to use when computing covariances
- */
- void
- setCorrespondenceRandomness (int k) { k_correspondences_ = k; }
-
- /** \brief Get the number of neighbors used when computing covariances as set by
- * the user
- */
- int
- getCorrespondenceRandomness () const { return k_correspondences_; }
-
- /** \brief Set maximum number of iterations at the optimization step
- * \param[in] max maximum number of iterations for the optimizer
- */
- void
- setMaximumOptimizerIterations (int max) { max_inner_iterations_ = max; }
-
- /** \brief Return maximum number of iterations at the optimization step
- */
- int
- getMaximumOptimizerIterations () const { return max_inner_iterations_; }
-
- /** \brief Set the minimal translation gradient threshold for early optimization stop
- * \param[in] translation gradient threshold in meters
- */
- void
- setTranslationGradientTolerance (double tolerance) { translation_gradient_tolerance_ = tolerance; }
-
- /** \brief Return the minimal translation gradient threshold for early optimization stop
- */
- double
- getTranslationGradientTolerance () const { return translation_gradient_tolerance_; }
-
- /** \brief Set the minimal rotation gradient threshold for early optimization stop
- * \param[in] rotation gradient threshold in radians
- */
- void
- setRotationGradientTolerance (double tolerance) { rotation_gradient_tolerance_ = tolerance; }
-
- /** \brief Return the minimal rotation gradient threshold for early optimization stop
- */
- double
- getRotationGradientTolerance () const { return rotation_gradient_tolerance_; }
-
- protected:
-
- /** \brief The number of neighbors used for covariances computation.
- * default: 20
- */
- int k_correspondences_;
-
- /** \brief The epsilon constant for gicp paper; this is NOT the convergence
- * tolerance
- * default: 0.001
- */
- double gicp_epsilon_;
-
- /** The epsilon constant for rotation error. (In GICP the transformation epsilon
- * is split in rotation part and translation part).
- * default: 2e-3
- */
- double rotation_epsilon_;
-
- /** \brief base transformation */
- Eigen::Matrix4f base_transformation_;
-
- /** \brief Temporary pointer to the source dataset. */
- const PointCloudSource *tmp_src_;
-
- /** \brief Temporary pointer to the target dataset. */
- const PointCloudTarget *tmp_tgt_;
-
- /** \brief Temporary pointer to the source dataset indices. */
- const std::vector<int> *tmp_idx_src_;
-
- /** \brief Temporary pointer to the target dataset indices. */
- const std::vector<int> *tmp_idx_tgt_;
-
- /** \brief Input cloud points covariances. */
- MatricesVectorPtr input_covariances_;
-
- /** \brief Target cloud points covariances. */
- MatricesVectorPtr target_covariances_;
-
- /** \brief Mahalanobis matrices holder. */
- std::vector<Eigen::Matrix3d> mahalanobis_;
-
- /** \brief maximum number of optimizations */
- int max_inner_iterations_;
-
- /** \brief minimal translation gradient for early optimization stop */
- double translation_gradient_tolerance_;
-
- /** \brief minimal rotation gradient for early optimization stop */
- double rotation_gradient_tolerance_;
-
- /** \brief compute points covariances matrices according to the K nearest
- * neighbors. K is set via setCorrespondenceRandomness() method.
- * \param cloud pointer to point cloud
- * \param tree KD tree performer for nearest neighbors search
- * \param[out] cloud_covariances covariances matrices for each point in the cloud
- */
- template<typename PointT>
- void computeCovariances(typename pcl::PointCloud<PointT>::ConstPtr cloud,
- const typename pcl::search::KdTree<PointT>::Ptr tree,
- MatricesVector& cloud_covariances);
-
- /** \return trace of mat1^t . mat2
- * \param mat1 matrix of dimension nxm
- * \param mat2 matrix of dimension nxp
- */
- inline double
- matricesInnerProd(const Eigen::MatrixXd& mat1, const Eigen::MatrixXd& mat2) const
- {
- double r = 0.;
- std::size_t n = mat1.rows();
- // tr(mat1^t.mat2)
- for(std::size_t i = 0; i < n; i++)
- for(std::size_t j = 0; j < n; j++)
- r += mat1 (j, i) * mat2 (i,j);
- return r;
- }
-
- /** \brief Rigid transformation computation method with initial guess.
- * \param output the transformed input point cloud dataset using the rigid transformation found
- * \param guess the initial guess of the transformation to compute
- */
- void
- computeTransformation (PointCloudSource &output, const Eigen::Matrix4f &guess) override;
-
- /** \brief Search for the closest nearest neighbor of a given point.
- * \param query the point to search a nearest neighbour for
- * \param index vector of size 1 to store the index of the nearest neighbour found
- * \param distance vector of size 1 to store the distance to nearest neighbour found
- */
- inline bool
- searchForNeighbors (const PointSource &query, std::vector<int>& index, std::vector<float>& distance)
- {
- int k = tree_->nearestKSearch (query, 1, index, distance);
- if (k == 0)
- return (false);
- return (true);
- }
-
- /// \brief compute transformation matrix from transformation matrix
- void applyState(Eigen::Matrix4f &t, const Vector6d& x) const;
-
- /// \brief optimization functor structure
- struct OptimizationFunctorWithIndices : public BFGSDummyFunctor<double,6>
- {
- OptimizationFunctorWithIndices (const GeneralizedIterativeClosestPoint* gicp)
- : BFGSDummyFunctor<double,6> (), gicp_(gicp) {}
- double operator() (const Vector6d& x) override;
- void df(const Vector6d &x, Vector6d &df) override;
- void fdf(const Vector6d &x, double &f, Vector6d &df) override;
- BFGSSpace::Status checkGradient(const Vector6d& g) override;
-
- const GeneralizedIterativeClosestPoint *gicp_;
- };
-
- std::function<void(const pcl::PointCloud<PointSource> &cloud_src,
- const std::vector<int> &src_indices,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- const std::vector<int> &tgt_indices,
- Eigen::Matrix4f &transformation_matrix)> rigid_transformation_estimation_;
+ return rotation_gradient_tolerance_;
+ }
+
+protected:
+ /** \brief The number of neighbors used for covariances computation.
+ * default: 20
+ */
+ int k_correspondences_;
+
+ /** \brief The epsilon constant for gicp paper; this is NOT the convergence
+ * tolerance
+ * default: 0.001
+ */
+ double gicp_epsilon_;
+
+ /** The epsilon constant for rotation error. (In GICP the transformation epsilon
+ * is split in rotation part and translation part).
+ * default: 2e-3
+ */
+ double rotation_epsilon_;
+
+ /** \brief base transformation */
+ Eigen::Matrix4f base_transformation_;
+
+ /** \brief Temporary pointer to the source dataset. */
+ const PointCloudSource* tmp_src_;
+
+ /** \brief Temporary pointer to the target dataset. */
+ const PointCloudTarget* tmp_tgt_;
+
+ /** \brief Temporary pointer to the source dataset indices. */
+ const pcl::Indices* tmp_idx_src_;
+
+ /** \brief Temporary pointer to the target dataset indices. */
+ const pcl::Indices* tmp_idx_tgt_;
+
+ /** \brief Input cloud points covariances. */
+ MatricesVectorPtr input_covariances_;
+
+ /** \brief Target cloud points covariances. */
+ MatricesVectorPtr target_covariances_;
+
+ /** \brief Mahalanobis matrices holder. */
+ std::vector<Eigen::Matrix3d> mahalanobis_;
+
+ /** \brief maximum number of optimizations */
+ int max_inner_iterations_;
+
+ /** \brief minimal translation gradient for early optimization stop */
+ double translation_gradient_tolerance_;
+
+ /** \brief minimal rotation gradient for early optimization stop */
+ double rotation_gradient_tolerance_;
+
+ /** \brief compute points covariances matrices according to the K nearest
+ * neighbors. K is set via setCorrespondenceRandomness() method.
+ * \param cloud pointer to point cloud
+ * \param tree KD tree performer for nearest neighbors search
+ * \param[out] cloud_covariances covariances matrices for each point in the cloud
+ */
+ template <typename PointT>
+ void
+ computeCovariances(typename pcl::PointCloud<PointT>::ConstPtr cloud,
+ const typename pcl::search::KdTree<PointT>::Ptr tree,
+ MatricesVector& cloud_covariances);
+
+ /** \return trace of mat1^t . mat2
+ * \param mat1 matrix of dimension nxm
+ * \param mat2 matrix of dimension nxp
+ */
+ inline double
+ matricesInnerProd(const Eigen::MatrixXd& mat1, const Eigen::MatrixXd& mat2) const
+ {
+ double r = 0.;
+ std::size_t n = mat1.rows();
+ // tr(mat1^t.mat2)
+ for (std::size_t i = 0; i < n; i++)
+ for (std::size_t j = 0; j < n; j++)
+ r += mat1(j, i) * mat2(i, j);
+ return r;
+ }
+
+ /** \brief Rigid transformation computation method with initial guess.
+ * \param output the transformed input point cloud dataset using the rigid
+ * transformation found \param guess the initial guess of the transformation to
+ * compute
+ */
+ void
+ computeTransformation(PointCloudSource& output,
+ const Eigen::Matrix4f& guess) override;
+
+ /** \brief Search for the closest nearest neighbor of a given point.
+ * \param query the point to search a nearest neighbour for
+ * \param index vector of size 1 to store the index of the nearest neighbour found
+ * \param distance vector of size 1 to store the distance to nearest neighbour found
+ */
+ inline bool
+ searchForNeighbors(const PointSource& query,
+ pcl::Indices& index,
+ std::vector<float>& distance)
+ {
+ int k = tree_->nearestKSearch(query, 1, index, distance);
+ if (k == 0)
+ return (false);
+ return (true);
+ }
+
+ /// \brief compute transformation matrix from transformation matrix
+ void
+ applyState(Eigen::Matrix4f& t, const Vector6d& x) const;
+
+ /// \brief optimization functor structure
+ struct OptimizationFunctorWithIndices : public BFGSDummyFunctor<double, 6> {
+ OptimizationFunctorWithIndices(const GeneralizedIterativeClosestPoint* gicp)
+ : BFGSDummyFunctor<double, 6>(), gicp_(gicp)
+ {}
+ double
+ operator()(const Vector6d& x) override;
+ void
+ df(const Vector6d& x, Vector6d& df) override;
+ void
+ fdf(const Vector6d& x, double& f, Vector6d& df) override;
+ BFGSSpace::Status
+ checkGradient(const Vector6d& g) override;
+
+ const GeneralizedIterativeClosestPoint* gicp_;
};
-}
+
+ std::function<void(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::Indices& src_indices,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ const pcl::Indices& tgt_indices,
+ Eigen::Matrix4f& transformation_matrix)>
+ rigid_transformation_estimation_;
+};
+} // namespace pcl
#include <pcl/registration/impl/gicp.hpp>
#pragma once
+#include <pcl/kdtree/impl/kdtree_flann.hpp>
+#include <pcl/registration/gicp.h>
#include <pcl/memory.h>
-#include <pcl/pcl_macros.h>
-#include <pcl/point_types.h>
+#include <pcl/pcl_exports.h> // for PCL_EXPORTS
#include <pcl/point_cloud.h>
#include <pcl/point_representation.h>
-#include <pcl/kdtree/impl/kdtree_flann.hpp>
-#include <pcl/registration/gicp.h>
+#include <pcl/point_types.h>
-namespace pcl
-{
- struct EIGEN_ALIGN16 _PointXYZLAB
- {
- PCL_ADD_POINT4D; // this adds the members x,y,z
- union
- {
- struct
- {
- float L;
- float a;
- float b;
- };
- float data_lab[4];
- };
- PCL_MAKE_ALIGNED_OPERATOR_NEW
- };
+namespace pcl {
+/** \brief GeneralizedIterativeClosestPoint6D integrates L*a*b* color space information
+ * into the Generalized Iterative Closest Point (GICP) algorithm.
+ *
+ * The suggested input is PointXYZRGBA.
+ *
+ * \note If you use this code in any academic work, please cite:
+ *
+ * - M. Korn, M. Holzkothen, J. Pauli
+ * Color Supported Generalized-ICP.
+ * In Proceedings of VISAPP 2014 - International Conference on Computer Vision Theory
+ * and Applications, Lisbon, Portugal, January 2014.
+ *
+ * \author Martin Holzkothen, Michael Korn
+ * \ingroup registration
+ */
+class PCL_EXPORTS GeneralizedIterativeClosestPoint6D
+: public GeneralizedIterativeClosestPoint<PointXYZRGBA, PointXYZRGBA> {
+ using PointSource = PointXYZRGBA;
+ using PointTarget = PointXYZRGBA;
- /** \brief A custom point type for position and CIELAB color value */
- struct PointXYZLAB : public _PointXYZLAB
- {
- inline PointXYZLAB ()
- {
- x = y = z = 0.0f; data[3] = 1.0f; // important for homogeneous coordinates
- L = a = b = 0.0f; data_lab[3] = 0.0f;
- }
- };
-}
-
-// register the custom point type in PCL
-POINT_CLOUD_REGISTER_POINT_STRUCT(pcl::_PointXYZLAB,
- (float, x, x)
- (float, y, y)
- (float, z, z)
- (float, L, L)
- (float, a, a)
- (float, b, b)
-)
-POINT_CLOUD_REGISTER_POINT_WRAPPER(pcl::PointXYZLAB, pcl::_PointXYZLAB)
-
-namespace pcl
-{
- /** \brief GeneralizedIterativeClosestPoint6D integrates L*a*b* color space information into the
- * Generalized Iterative Closest Point (GICP) algorithm.
- *
- * The suggested input is PointXYZRGBA.
+public:
+ /** \brief constructor.
*
- * \note If you use this code in any academic work, please cite:
+ * \param[in] lab_weight the color weight
+ */
+ GeneralizedIterativeClosestPoint6D(float lab_weight = 0.032f);
+
+ /** \brief Provide a pointer to the input source
+ * (e.g., the point cloud that we want to align to the target)
*
- * - M. Korn, M. Holzkothen, J. Pauli
- * Color Supported Generalized-ICP.
- * In Proceedings of VISAPP 2014 - International Conference on Computer Vision Theory and Applications,
- * Lisbon, Portugal, January 2014.
+ * \param[in] cloud the input point cloud source
+ */
+ void
+ setInputSource(const PointCloudSourceConstPtr& cloud) override;
+
+ /** \brief Provide a pointer to the input target
+ * (e.g., the point cloud that we want to align the input source to)
*
- * \author Martin Holzkothen, Michael Korn
- * \ingroup registration
+ * \param[in] cloud the input point cloud target
+ */
+ void
+ setInputTarget(const PointCloudTargetConstPtr& target) override;
+
+protected:
+ /** \brief Rigid transformation computation method with initial guess.
+ * \param output the transformed input point cloud dataset using the rigid
+ * transformation found \param guess the initial guess of the transformation to
+ * compute
+ */
+ void
+ computeTransformation(PointCloudSource& output,
+ const Eigen::Matrix4f& guess) override;
+
+ /** \brief Search for the closest nearest neighbor of a given point.
+ * \param query the point to search a nearest neighbour for
+ * \param index vector of size 1 to store the index of the nearest neighbour found
+ * \param distance vector of size 1 to store the distance to nearest neighbour found
*/
- class PCL_EXPORTS GeneralizedIterativeClosestPoint6D : public GeneralizedIterativeClosestPoint<PointXYZRGBA, PointXYZRGBA>
- {
- using PointSource = PointXYZRGBA;
- using PointTarget = PointXYZRGBA;
-
- public:
-
- /** \brief constructor.
- *
- * \param[in] lab_weight the color weight
- */
- GeneralizedIterativeClosestPoint6D (float lab_weight = 0.032f);
-
- /** \brief Provide a pointer to the input source
- * (e.g., the point cloud that we want to align to the target)
- *
- * \param[in] cloud the input point cloud source
- */
- void
- setInputSource (const PointCloudSourceConstPtr& cloud) override;
-
- /** \brief Provide a pointer to the input target
- * (e.g., the point cloud that we want to align the input source to)
- *
- * \param[in] cloud the input point cloud target
- */
- void
- setInputTarget (const PointCloudTargetConstPtr& target) override;
-
- protected:
-
- /** \brief Rigid transformation computation method with initial guess.
- * \param output the transformed input point cloud dataset using the rigid transformation found
- * \param guess the initial guess of the transformation to compute
- */
- void
- computeTransformation (PointCloudSource& output,
- const Eigen::Matrix4f& guess) override;
-
- /** \brief Search for the closest nearest neighbor of a given point.
- * \param query the point to search a nearest neighbour for
- * \param index vector of size 1 to store the index of the nearest neighbour found
- * \param distance vector of size 1 to store the distance to nearest neighbour found
- */
- inline bool
- searchForNeighbors (const PointXYZLAB& query, std::vector<int>& index, std::vector<float>& distance);
-
- protected:
- /** \brief Holds the converted (LAB) data cloud. */
- pcl::PointCloud<PointXYZLAB>::Ptr cloud_lab_;
-
- /** \brief Holds the converted (LAB) model cloud. */
- pcl::PointCloud<PointXYZLAB>::Ptr target_lab_;
-
- /** \brief 6d-tree to search in model cloud. */
- KdTreeFLANN<PointXYZLAB> target_tree_lab_;
-
- /** \brief The color weight. */
- float lab_weight_;
-
- /** \brief Custom point representation to perform kdtree searches in more than 3 (i.e. in all 6) dimensions. */
- class MyPointRepresentation : public PointRepresentation<PointXYZLAB>
- {
- using PointRepresentation<PointXYZLAB>::nr_dimensions_;
- using PointRepresentation<PointXYZLAB>::trivial_;
-
- public:
- using Ptr = shared_ptr<MyPointRepresentation>;
- using ConstPtr = shared_ptr<const MyPointRepresentation>;
-
- MyPointRepresentation ()
- {
- nr_dimensions_ = 6;
- trivial_ = false;
- }
-
-
- ~MyPointRepresentation ()
- {
- }
-
- inline Ptr
- makeShared () const
- {
- return Ptr (new MyPointRepresentation (*this));
- }
-
- void
- copyToFloatArray (const PointXYZLAB &p, float * out) const override
- {
- // copy all of the six values
- out[0] = p.x;
- out[1] = p.y;
- out[2] = p.z;
- out[3] = p.L;
- out[4] = p.a;
- out[5] = p.b;
- }
- };
-
- /** \brief Enables 6d searches with kd-tree class using the color weight. */
- MyPointRepresentation point_rep_;
+ inline bool
+ searchForNeighbors(const PointXYZLAB& query,
+ pcl::Indices& index,
+ std::vector<float>& distance);
+
+protected:
+ /** \brief Holds the converted (LAB) data cloud. */
+ pcl::PointCloud<PointXYZLAB>::Ptr cloud_lab_;
+
+ /** \brief Holds the converted (LAB) model cloud. */
+ pcl::PointCloud<PointXYZLAB>::Ptr target_lab_;
+
+ /** \brief 6d-tree to search in model cloud. */
+ KdTreeFLANN<PointXYZLAB> target_tree_lab_;
+
+ /** \brief The color weight. */
+ float lab_weight_;
+
+ /** \brief Custom point representation to perform kdtree searches in more than 3
+ * (i.e. in all 6) dimensions. */
+ class MyPointRepresentation : public PointRepresentation<PointXYZLAB> {
+ using PointRepresentation<PointXYZLAB>::nr_dimensions_;
+ using PointRepresentation<PointXYZLAB>::trivial_;
+
+ public:
+ using Ptr = shared_ptr<MyPointRepresentation>;
+ using ConstPtr = shared_ptr<const MyPointRepresentation>;
+
+ MyPointRepresentation()
+ {
+ nr_dimensions_ = 6;
+ trivial_ = false;
+ }
+
+ ~MyPointRepresentation() {}
+
+ inline Ptr
+ makeShared() const
+ {
+ return Ptr(new MyPointRepresentation(*this));
+ }
+
+ void
+ copyToFloatArray(const PointXYZLAB& p, float* out) const override
+ {
+ // copy all of the six values
+ out[0] = p.x;
+ out[1] = p.y;
+ out[2] = p.z;
+ out[3] = p.L;
+ out[4] = p.a;
+ out[5] = p.b;
+ }
};
-}
+
+ /** \brief Enables 6d searches with kd-tree class using the color weight. */
+ MyPointRepresentation point_rep_;
+};
+} // namespace pcl
#pragma once
-#include <pcl/registration/vertex_estimates.h>
#include <pcl/registration/edge_measurements.h>
+#include <pcl/registration/vertex_estimates.h>
#include <pcl/exceptions.h>
+
#include "boost/graph/graph_traits.hpp"
-namespace pcl
-{
- namespace registration
+namespace pcl {
+namespace registration {
+/** \brief @b GraphHandler class is a wrapper for a general SLAM graph
+ * The actual graph class must fulfill the following boost::graph concepts:
+ * - BidirectionalGraph
+ * - AdjacencyGraph
+ * - VertexAndEdgeListGraph
+ * - MutableGraph
+ *
+ * Other valid expressions:
+ * - add_edge (m,g) add a new edge according to the given measurement. Return
+ * type: std::pair<edge_descriptor, bool>
+ * - add_vertex (e,g) add a new vertex according to the given estimate. Return
+ * type: vertex_descriptor
+ * - get_pose (v,g) retrieve the pose estimate for v, if any. Return type:
+ * Eigen::Matrix4f
+ * - get_cloud (v,g) retrieve the cloud pointer associated to v, if any. Return
+ * type: pcl::PointCloud<PointT>::ConstPtr
+ * - set_estimate (v,e,g) set the estimate for an existing vertex. Return type: void.
+ * - set_measurement (d,m,g) set the measurement for an existing edge. Return type:
+ * void. Notation:
+ * - m an edge measurement
+ * - e a vertex estimate
+ * - v a vertex
+ * - d an edge
+ * - g a graph
+ * A valid graph implementation should accept at least the PoseEstimate estimate and the
+ * PoseMeasurement measurement
+ *
+ * If a specific graph implementation needs initialization and/or finalization,
+ * specialize the protected methods init() and deinit() for your graph type
+ *
+ * \author Nicola Fioraio
+ * \ingroup registration
+ */
+template <typename GraphT>
+class GraphHandler {
+public:
+ using Ptr = shared_ptr<GraphHandler<GraphT>>;
+ using ConstPtr = shared_ptr<const GraphHandler<GraphT>>;
+ using GraphPtr = shared_ptr<GraphT>;
+ using GraphConstPtr = shared_ptr<const GraphT>;
+
+ using Vertex = typename boost::graph_traits<GraphT>::vertex_descriptor;
+ using Edge = typename boost::graph_traits<GraphT>::edge_descriptor;
+
+ /** \brief Empty constructor. */
+ GraphHandler() : graph_impl_(new GraphT())
+ {
+ if (!init())
+ throw InitFailedException("Graph Initialization Failed",
+ __FILE__,
+ "pcl::registration::GraphHandler::GraphHandler ()",
+ __LINE__);
+ }
+
+ /** \brief Destructor. */
+ ~GraphHandler() { deinit(); }
+
+ /** \brief Clear the graph */
+ inline void
+ clear()
+ {
+ deinit();
+ graph_impl_.reset(new GraphT());
+ if (!init())
+ throw InitFailedException("Graph Initialization Failed",
+ __FILE__,
+ "pcl::registration::GraphHandler::clear ()",
+ __LINE__);
+ }
+
+ /** \brief Get a pointer to the BGL graph */
+ inline GraphConstPtr
+ getGraph() const
+ {
+ return graph_impl_;
+ }
+
+ /** \brief Get a pointer to the BGL graph */
+ inline GraphPtr
+ getGraph()
+ {
+ return graph_impl_;
+ }
+
+ /** \brief Add a new point cloud to the graph and return the new vertex
+ * \param cloud the new point cloud
+ * \param pose the pose estimate
+ * \return a reference to the new vertex
+ */
+ template <class PointT>
+ inline Vertex
+ addPointCloud(const typename pcl::PointCloud<PointT>::ConstPtr& cloud,
+ const Eigen::Matrix4f& pose)
+ {
+ return add_vertex(PoseEstimate<PointT>(cloud, pose), *graph_impl_);
+ }
+
+ /** \brief Add a new generic vertex created according to the given estimate
+ * \param estimate the parameters' estimate
+ * \return a reference to the new vertex
+ */
+ template <class EstimateT>
+ inline Vertex
+ addGenericVertex(const EstimateT& estimate)
+ {
+ return add_vertex(estimate, *graph_impl_);
+ }
+
+ /** \brief Add a new constraint between two poses
+ * \param v_start the first pose
+ * \param v_end the second pose
+ * \param relative_transformation the transformation from v_start to v_end
+ * \param information_matrix the uncertainty
+ * \return a reference to the new edge
+ */
+ template <class InformationT>
+ inline Edge
+ addPoseConstraint(const Vertex& v_start,
+ const Vertex& v_end,
+ const Eigen::Matrix4f& relative_transformation,
+ const InformationT& information_matrix)
+ {
+ return add_edge(PoseMeasurement<Vertex, InformationT>(
+ v_start, v_end, relative_transformation, information_matrix),
+ *graph_impl_);
+ }
+
+ /** \brief Add a generic constraint created according to the given measurement
+ * \param measurement the measurement
+ * \return a reference to the new edge
+ */
+ template <class MeasurementT>
+ inline Edge
+ addGenericConstraint(const MeasurementT& measurement)
+ {
+ return add_edge(measurement, *graph_impl_);
+ }
+
+ /** \brief Remove a vertex from the graph
+ * \param v the vertex
+ */
+ inline void
+ removeVertex(const Vertex& v)
{
- /** \brief @b GraphHandler class is a wrapper for a general SLAM graph
- * The actual graph class must fulfill the following boost::graph concepts:
- * - BidirectionalGraph
- * - AdjacencyGraph
- * - VertexAndEdgeListGraph
- * - MutableGraph
- *
- * Other valid expressions:
- * - add_edge (m,g) add a new edge according to the given measurement. Return type: std::pair<edge_descriptor, bool>
- * - add_vertex (e,g) add a new vertex according to the given estimate. Return type: vertex_descriptor
- * - get_pose (v,g) retrieve the pose estimate for v, if any. Return type: Eigen::Matrix4f
- * - get_cloud (v,g) retrieve the cloud pointer associated to v, if any. Return type: pcl::PointCloud<PointT>::ConstPtr
- * - set_estimate (v,e,g) set the estimate for an existing vertex. Return type: void.
- * - set_measurement (d,m,g) set the measurement for an existing edge. Return type: void.
- * Notation:
- * - m an edge measurement
- * - e a vertex estimate
- * - v a vertex
- * - d an edge
- * - g a graph
- * A valid graph implementation should accept at least the PoseEstimate estimate and the PoseMeasurement measurement
- *
- * If a specific graph implementation needs initialization and/or finalization,
- * specialize the protected methods init() and deinit() for your graph type
- *
- * \author Nicola Fioraio
- * \ingroup registration
- */
- template <typename GraphT>
- class GraphHandler
- {
- public:
- using Ptr = shared_ptr<GraphHandler<GraphT> >;
- using ConstPtr = shared_ptr<const GraphHandler<GraphT> >;
- using GraphPtr = shared_ptr<GraphT>;
- using GraphConstPtr = shared_ptr<const GraphT>;
-
- using Vertex = typename boost::graph_traits<GraphT>::vertex_descriptor;
- using Edge = typename boost::graph_traits<GraphT>::edge_descriptor;
-
- /** \brief Empty constructor. */
- GraphHandler () : graph_impl_ (new GraphT ())
- {
- if (!init ())
- throw InitFailedException ("Graph Initialization Failed", __FILE__, "pcl::registration::GraphHandler::GraphHandler ()", __LINE__);
- }
-
- /** \brief Destructor. */
- ~GraphHandler ()
- {
- deinit ();
- }
-
- /** \brief Clear the graph */
- inline void
- clear ()
- {
- deinit ();
- graph_impl_.reset (new GraphT ());
- if (!init ())
- throw InitFailedException ("Graph Initialization Failed", __FILE__, "pcl::registration::GraphHandler::clear ()", __LINE__);
- }
-
- /** \brief Get a pointer to the BGL graph */
- inline GraphConstPtr
- getGraph () const
- {
- return graph_impl_;
- }
-
- /** \brief Get a pointer to the BGL graph */
- inline GraphPtr
- getGraph ()
- {
- return graph_impl_;
- }
-
- /** \brief Add a new point cloud to the graph and return the new vertex
- * \param cloud the new point cloud
- * \param pose the pose estimate
- * \return a reference to the new vertex
- */
- template <class PointT> inline Vertex
- addPointCloud (const typename pcl::PointCloud<PointT>::ConstPtr& cloud, const Eigen::Matrix4f& pose)
- {
- return add_vertex (PoseEstimate<PointT> (cloud, pose), *graph_impl_);
- }
-
- /** \brief Add a new generic vertex created according to the given estimate
- * \param estimate the parameters' estimate
- * \return a reference to the new vertex
- */
- template <class EstimateT> inline Vertex
- addGenericVertex (const EstimateT& estimate)
- {
- return add_vertex (estimate, *graph_impl_);
- }
-
- /** \brief Add a new constraint between two poses
- * \param v_start the first pose
- * \param v_end the second pose
- * \param relative_transformation the transformation from v_start to v_end
- * \param information_matrix the uncertainty
- * \return a reference to the new edge
- */
- template <class InformationT> inline Edge
- addPoseConstraint ( const Vertex& v_start, const Vertex& v_end,
- const Eigen::Matrix4f& relative_transformation,
- const InformationT& information_matrix)
- {
- return add_edge ( PoseMeasurement<Vertex, InformationT> (v_start, v_end, relative_transformation, information_matrix),
- *graph_impl_);
- }
-
- /** \brief Add a generic constraint created according to the given measurement
- * \param measurement the measurement
- * \return a reference to the new edge
- */
- template <class MeasurementT> inline Edge
- addGenericConstraint (const MeasurementT& measurement)
- {
- return add_edge (measurement, *graph_impl_);
- }
-
- /** \brief Remove a vertex from the graph
- * \param v the vertex
- */
- inline void
- removeVertex (const Vertex& v)
- {
- remove_vertex (v.v_, *graph_impl_);
- }
-
- /** \brief Remove a constraint from the graph
- * \param e the edge
- */
- inline void
- removeConstraint (const Edge& e)
- {
- remove_edge(e.e_, *graph_impl_);
- }
-
- protected:
- /** \brief This method is called right after the creation of graph_impl_ */
- inline bool
- init ()
- {
- return true;
- }
-
- /** \brief This method is called when graph_impl_ is going to be destroyed */
- inline bool
- deinit ()
- {
- return true;
- }
-
- private:
- GraphPtr graph_impl_;
- };
+ remove_vertex(v.v_, *graph_impl_);
}
-}
+
+ /** \brief Remove a constraint from the graph
+ * \param e the edge
+ */
+ inline void
+ removeConstraint(const Edge& e)
+ {
+ remove_edge(e.e_, *graph_impl_);
+ }
+
+protected:
+ /** \brief This method is called right after the creation of graph_impl_ */
+ inline bool
+ init()
+ {
+ return true;
+ }
+
+ /** \brief This method is called when graph_impl_ is going to be destroyed */
+ inline bool
+ deinit()
+ {
+ return true;
+ }
+
+private:
+ GraphPtr graph_impl_;
+};
+} // namespace registration
+} // namespace pcl
#include <pcl/registration/graph_handler.h>
-namespace pcl
-{
- namespace registration
- {
- /** \brief @b GraphOptimizer class; derive and specialize for each graph type
- * \author Nicola Fioraio
- * \ingroup registration
- */
- template <typename GraphT>
- class GraphOptimizer
- {
- public:
- /** \brief Optimize the given graph
- * \param inout_graph the graph
- * \return false if the optimization has not been performed
- */
- virtual inline bool
- optimize(GraphHandler<GraphT>& inout_graph) = 0;
-
- /** \brief Empty destructor */
- virtual ~GraphOptimizer () {}
- };
- }
-}
+namespace pcl {
+namespace registration {
+/** \brief @b GraphOptimizer class; derive and specialize for each graph type
+ * \author Nicola Fioraio
+ * \ingroup registration
+ */
+template <typename GraphT>
+class GraphOptimizer {
+public:
+ /** \brief Optimize the given graph
+ * \param inout_graph the graph
+ * \return false if the optimization has not been performed
+ */
+ virtual inline bool
+ optimize(GraphHandler<GraphT>& inout_graph) = 0;
+
+ /** \brief Empty destructor */
+ virtual ~GraphOptimizer() {}
+};
+} // namespace registration
+} // namespace pcl
#pragma once
-#include <pcl/point_cloud.h>
#include <pcl/registration/graph_handler.h>
+#include <pcl/point_cloud.h>
-namespace pcl
-{
- /** \brief @b GraphRegistration class is the base class for graph-based registration methods
- * \author Nicola Fioraio
- * \ingroup registration
- */
- template <typename GraphT>
- class GraphRegistration
- {
- public:
- using GraphHandler = pcl::registration::GraphHandler<GraphT>;
- using GraphHandlerPtr = typename pcl::registration::GraphHandler<GraphT>::Ptr;
- using GraphHandlerConstPtr = typename pcl::registration::GraphHandler<GraphT>::ConstPtr;
- using GraphHandlerVertex = typename pcl::registration::GraphHandler<GraphT>::Vertex;
+namespace pcl {
+/** \brief @b GraphRegistration class is the base class for graph-based registration
+ * methods \author Nicola Fioraio \ingroup registration
+ */
+template <typename GraphT>
+class GraphRegistration {
+public:
+ using GraphHandler = pcl::registration::GraphHandler<GraphT>;
+ using GraphHandlerPtr = typename pcl::registration::GraphHandler<GraphT>::Ptr;
+ using GraphHandlerConstPtr =
+ typename pcl::registration::GraphHandler<GraphT>::ConstPtr;
+ using GraphHandlerVertex = typename pcl::registration::GraphHandler<GraphT>::Vertex;
+
+ /** \brief Empty constructor */
+ GraphRegistration()
+ : graph_handler_(new GraphHandler)
+ , last_aligned_vertex_(boost::graph_traits<GraphT>::null_vertex())
+ , last_vertices_()
+ {}
- /** \brief Empty constructor */
- GraphRegistration () : graph_handler_ (new GraphHandler),
- last_aligned_vertex_ (boost::graph_traits<GraphT>::null_vertex ()),
- last_vertices_ ()
- {}
-
- /** \brief Empty destructor */
- virtual ~GraphRegistration () {}
+ /** \brief Empty destructor */
+ virtual ~GraphRegistration() {}
- /** \brief Add a point cloud and the associated camera pose to the graph */
- template <typename PointT> inline void
- addPointCloud (const typename pcl::PointCloud<PointT>::ConstPtr& cloud, const Eigen::Matrix4f& pose)
- {
- last_vertices_.push_back (graph_handler_->addPointCloud (cloud, pose));
- }
+ /** \brief Add a point cloud and the associated camera pose to the graph */
+ template <typename PointT>
+ inline void
+ addPointCloud(const typename pcl::PointCloud<PointT>::ConstPtr& cloud,
+ const Eigen::Matrix4f& pose)
+ {
+ last_vertices_.push_back(graph_handler_->addPointCloud(cloud, pose));
+ }
- /** \brief Set the graph handler */
- inline void
- setGraphHandler (GraphHandlerPtr& gh)
- {
- graph_handler_ = gh;
- }
+ /** \brief Set the graph handler */
+ inline void
+ setGraphHandler(GraphHandlerPtr& gh)
+ {
+ graph_handler_ = gh;
+ }
- /** \brief Get a pointer to the graph handler */
- inline GraphHandlerPtr
- getGraphHandler ()
- {
- return graph_handler_;
- }
+ /** \brief Get a pointer to the graph handler */
+ inline GraphHandlerPtr
+ getGraphHandler()
+ {
+ return graph_handler_;
+ }
- /** \brief Get a pointer to the graph handler */
- inline GraphHandlerConstPtr
- getGraphHandler () const
- {
- return graph_handler_;
- }
+ /** \brief Get a pointer to the graph handler */
+ inline GraphHandlerConstPtr
+ getGraphHandler() const
+ {
+ return graph_handler_;
+ }
- /** \brief Check if new poses have been added, then call the registration
- * method which is implemented by the subclasses
- */
- inline void
- compute ()
- {
- if (last_vertices_.empty ())
- return;
- computeRegistration ();
- last_aligned_vertex_ = last_vertices_.back ();
- last_vertices_.clear ();
- }
+ /** \brief Check if new poses have been added, then call the registration
+ * method which is implemented by the subclasses
+ */
+ inline void
+ compute()
+ {
+ if (last_vertices_.empty())
+ return;
+ computeRegistration();
+ last_aligned_vertex_ = last_vertices_.back();
+ last_vertices_.clear();
+ }
- protected:
- /** \brief The graph handler */
- GraphHandlerPtr graph_handler_;
- /** \brief The last estimated pose */
- GraphHandlerVertex last_aligned_vertex_;
- /** \brief The vertices added to the graph since the last call to compute */
- std::vector<GraphHandlerVertex> last_vertices_;
+protected:
+ /** \brief The graph handler */
+ GraphHandlerPtr graph_handler_;
+ /** \brief The last estimated pose */
+ GraphHandlerVertex last_aligned_vertex_;
+ /** \brief The vertices added to the graph since the last call to compute */
+ std::vector<GraphHandlerVertex> last_vertices_;
- private:
- /** \brief The registration method */
- virtual void
- computeRegistration () = 0;
- };
-}
+private:
+ /** \brief The registration method */
+ virtual void
+ computeRegistration() = 0;
+};
+} // namespace pcl
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
#pragma once
#include <pcl/common/common.h>
-#include <pcl/registration/registration.h>
#include <pcl/registration/matching_candidate.h>
+#include <pcl/registration/registration.h>
-namespace pcl
-{
- /** \brief Compute the mean point density of a given point cloud.
- * \param[in] cloud pointer to the input point cloud
- * \param[in] max_dist maximum distance of a point to be considered as a neighbor
- * \param[in] nr_threads number of threads to use (default = 1, only used if OpenMP flag is set)
- * \return the mean point density of a given point cloud
- */
- template <typename PointT> inline float
- getMeanPointDensity (const typename pcl::PointCloud<PointT>::ConstPtr &cloud, float max_dist, int nr_threads = 1);
-
- /** \brief Compute the mean point density of a given point cloud.
- * \param[in] cloud pointer to the input point cloud
- * \param[in] indices the vector of point indices to use from \a cloud
- * \param[in] max_dist maximum distance of a point to be considered as a neighbor
- * \param[in] nr_threads number of threads to use (default = 1, only used if OpenMP flag is set)
- * \return the mean point density of a given point cloud
- */
- template <typename PointT> inline float
- getMeanPointDensity (const typename pcl::PointCloud<PointT>::ConstPtr &cloud, const std::vector <int> &indices,
- float max_dist, int nr_threads = 1);
-
-
- namespace registration
+namespace pcl {
+/** \brief Compute the mean point density of a given point cloud.
+ * \param[in] cloud pointer to the input point cloud
+ * \param[in] max_dist maximum distance of a point to be considered as a neighbor
+ * \param[in] nr_threads number of threads to use (default = 1, only used if OpenMP flag
+ * is set) \return the mean point density of a given point cloud
+ */
+template <typename PointT>
+inline float
+getMeanPointDensity(const typename pcl::PointCloud<PointT>::ConstPtr& cloud,
+ float max_dist,
+ int nr_threads = 1);
+
+/** \brief Compute the mean point density of a given point cloud.
+ * \param[in] cloud pointer to the input point cloud
+ * \param[in] indices the vector of point indices to use from \a cloud
+ * \param[in] max_dist maximum distance of a point to be considered as a neighbor
+ * \param[in] nr_threads number of threads to use (default = 1, only used if OpenMP flag
+ * is set) \return the mean point density of a given point cloud
+ */
+template <typename PointT>
+inline float
+getMeanPointDensity(const typename pcl::PointCloud<PointT>::ConstPtr& cloud,
+ const pcl::Indices& indices,
+ float max_dist,
+ int nr_threads = 1);
+
+namespace registration {
+/** \brief FPCSInitialAlignment computes corresponding four point congruent sets as
+ * described in: "4-points congruent sets for robust pairwise surface registration",
+ * Dror Aiger, Niloy Mitra, Daniel Cohen-Or. ACM Transactions on Graphics, vol. 27(3),
+ * 2008 \author P.W.Theiler \ingroup registration
+ */
+template <typename PointSource,
+ typename PointTarget,
+ typename NormalT = pcl::Normal,
+ typename Scalar = float>
+class FPCSInitialAlignment : public Registration<PointSource, PointTarget, Scalar> {
+public:
+ /** \cond */
+ using Ptr =
+ shared_ptr<FPCSInitialAlignment<PointSource, PointTarget, NormalT, Scalar>>;
+ using ConstPtr =
+ shared_ptr<const FPCSInitialAlignment<PointSource, PointTarget, NormalT, Scalar>>;
+
+ using KdTreeReciprocal = pcl::search::KdTree<PointSource>;
+ using KdTreeReciprocalPtr = typename KdTreeReciprocal::Ptr;
+
+ using PointCloudTarget = pcl::PointCloud<PointTarget>;
+ using PointCloudSource = pcl::PointCloud<PointSource>;
+ using PointCloudSourcePtr = typename PointCloudSource::Ptr;
+ using PointCloudSourceIterator = typename PointCloudSource::iterator;
+
+ using Normals = pcl::PointCloud<NormalT>;
+ using NormalsConstPtr = typename Normals::ConstPtr;
+
+ using MatchingCandidate = pcl::registration::MatchingCandidate;
+ using MatchingCandidates = pcl::registration::MatchingCandidates;
+ /** \endcond */
+
+ /** \brief Constructor.
+ * Resets the maximum number of iterations to 0 thus forcing an internal computation
+ * if not set by the user. Sets the number of RANSAC iterations to 1000 and the
+ * standard transformation estimation to TransformationEstimation3Point.
+ */
+ FPCSInitialAlignment();
+
+ /** \brief Destructor. */
+ ~FPCSInitialAlignment(){};
+
+ /** \brief Provide a pointer to the vector of target indices.
+ * \param[in] target_indices a pointer to the target indices
+ */
+ inline void
+ setTargetIndices(const IndicesPtr& target_indices)
{
- /** \brief FPCSInitialAlignment computes corresponding four point congruent sets as described in:
- * "4-points congruent sets for robust pairwise surface registration", Dror Aiger, Niloy Mitra, Daniel Cohen-Or.
- * ACM Transactions on Graphics, vol. 27(3), 2008
- * \author P.W.Theiler
- * \ingroup registration
- */
- template <typename PointSource, typename PointTarget, typename NormalT = pcl::Normal, typename Scalar = float>
- class FPCSInitialAlignment : public Registration <PointSource, PointTarget, Scalar>
- {
- public:
- /** \cond */
- using Ptr = shared_ptr <FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar> >;
- using ConstPtr = shared_ptr <const FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar> >;
-
- using KdTreeReciprocal = pcl::search::KdTree<PointSource>;
- using KdTreeReciprocalPtr = typename KdTreeReciprocal::Ptr;
-
- using PointCloudTarget = pcl::PointCloud<PointTarget>;
- using PointCloudSource = pcl::PointCloud<PointSource>;
- using PointCloudSourcePtr = typename PointCloudSource::Ptr;
- using PointCloudSourceIterator = typename PointCloudSource::iterator;
-
- using Normals = pcl::PointCloud<NormalT>;
- using NormalsConstPtr = typename Normals::ConstPtr;
-
- using MatchingCandidate = pcl::registration::MatchingCandidate;
- using MatchingCandidates = pcl::registration::MatchingCandidates;
- /** \endcond */
-
-
- /** \brief Constructor.
- * Resets the maximum number of iterations to 0 thus forcing an internal computation if not set by the user.
- * Sets the number of RANSAC iterations to 1000 and the standard transformation estimation to TransformationEstimation3Point.
- */
- FPCSInitialAlignment ();
-
- /** \brief Destructor. */
- ~FPCSInitialAlignment ()
- {};
-
-
- /** \brief Provide a pointer to the vector of target indices.
- * \param[in] target_indices a pointer to the target indices
- */
- inline void
- setTargetIndices (const IndicesPtr &target_indices)
- {
- target_indices_ = target_indices;
- };
-
- /** \return a pointer to the vector of target indices. */
- inline IndicesPtr
- getTargetIndices () const
- {
- return (target_indices_);
- };
-
-
- /** \brief Provide a pointer to the normals of the source point cloud.
- * \param[in] source_normals pointer to the normals of the source pointer cloud.
- */
- inline void
- setSourceNormals (const NormalsConstPtr &source_normals)
- {
- source_normals_ = source_normals;
- };
-
- /** \return the normals of the source point cloud. */
- inline NormalsConstPtr
- getSourceNormals () const
- {
- return (source_normals_);
- };
-
-
- /** \brief Provide a pointer to the normals of the target point cloud.
- * \param[in] target_normals point to the normals of the target point cloud.
- */
- inline void
- setTargetNormals (const NormalsConstPtr &target_normals)
- {
- target_normals_ = target_normals;
- };
-
- /** \return the normals of the target point cloud. */
- inline NormalsConstPtr
- getTargetNormals () const
- {
- return (target_normals_);
- };
-
-
- /** \brief Set the number of used threads if OpenMP is activated.
- * \param[in] nr_threads the number of used threads
- */
- inline void
- setNumberOfThreads (int nr_threads)
- {
- nr_threads_ = nr_threads;
- };
-
- /** \return the number of threads used if OpenMP is activated. */
- inline int
- getNumberOfThreads () const
- {
- return (nr_threads_);
- };
-
-
- /** \brief Set the constant factor delta which weights the internally calculated parameters.
- * \param[in] delta the weight factor delta
- * \param[in] normalize flag if delta should be normalized according to point cloud density
- */
- inline void
- setDelta (float delta, bool normalize = false)
- {
- delta_ = delta;
- normalize_delta_ = normalize;
- };
-
- /** \return the constant factor delta which weights the internally calculated parameters. */
- inline float
- getDelta () const
- {
- return (delta_);
- };
-
-
- /** \brief Set the approximate overlap between source and target.
- * \param[in] approx_overlap the estimated overlap
- */
- inline void
- setApproxOverlap (float approx_overlap)
- {
- approx_overlap_ = approx_overlap;
- };
-
- /** \return the approximated overlap between source and target. */
- inline float
- getApproxOverlap () const
- {
- return (approx_overlap_);
- };
-
-
- /** \brief Set the scoring threshold used for early finishing the method.
- * \param[in] score_threshold early terminating score criteria
- */
- inline void
- setScoreThreshold (float score_threshold)
- {
- score_threshold_ = score_threshold;
- };
-
- /** \return the scoring threshold used for early finishing the method. */
- inline float
- getScoreThreshold () const
- {
- return (score_threshold_);
- };
-
-
- /** \brief Set the number of source samples to use during alignment.
- * \param[in] nr_samples the number of source samples
- */
- inline void
- setNumberOfSamples (int nr_samples)
- {
- nr_samples_ = nr_samples;
- };
-
- /** \return the number of source samples to use during alignment. */
- inline int
- getNumberOfSamples () const
- {
- return (nr_samples_);
- };
-
-
- /** \brief Set the maximum normal difference between valid point correspondences in degree.
- * \param[in] max_norm_diff the maximum difference in degree
- */
- inline void
- setMaxNormalDifference (float max_norm_diff)
- {
- max_norm_diff_ = max_norm_diff;
- };
-
- /** \return the maximum normal difference between valid point correspondences in degree. */
- inline float
- getMaxNormalDifference () const
- {
- return (max_norm_diff_);
- };
-
-
- /** \brief Set the maximum computation time in seconds.
- * \param[in] max_runtime the maximum runtime of the method in seconds
- */
- inline void
- setMaxComputationTime (int max_runtime)
- {
- max_runtime_ = max_runtime;
- };
-
- /** \return the maximum computation time in seconds. */
- inline int
- getMaxComputationTime () const
- {
- return (max_runtime_);
- };
-
-
- /** \return the fitness score of the best scored four-point match. */
- inline float
- getFitnessScore () const
- {
- return (fitness_score_);
- };
-
- protected:
-
- using PCLBase <PointSource>::deinitCompute;
- using PCLBase <PointSource>::input_;
- using PCLBase <PointSource>::indices_;
-
- using Registration <PointSource, PointTarget, Scalar>::reg_name_;
- using Registration <PointSource, PointTarget, Scalar>::target_;
- using Registration <PointSource, PointTarget, Scalar>::tree_;
- using Registration <PointSource, PointTarget, Scalar>::correspondences_;
- using Registration <PointSource, PointTarget, Scalar>::target_cloud_updated_;
- using Registration <PointSource, PointTarget, Scalar>::final_transformation_;
- using Registration <PointSource, PointTarget, Scalar>::max_iterations_;
- using Registration <PointSource, PointTarget, Scalar>::ransac_iterations_;
- using Registration <PointSource, PointTarget, Scalar>::transformation_estimation_;
- using Registration <PointSource, PointTarget, Scalar>::converged_;
-
-
- /** \brief Rigid transformation computation method.
- * \param output the transformed input point cloud dataset using the rigid transformation found
- * \param guess The computed transforamtion
- */
- void
- computeTransformation (PointCloudSource &output, const Eigen::Matrix4f& guess) override;
-
-
- /** \brief Internal computation initialization. */
- virtual bool
- initCompute ();
-
- /** \brief Select an approximately coplanar set of four points from the source cloud.
- * \param[out] base_indices selected source cloud indices, further used as base (B)
- * \param[out] ratio the two diagonal intersection ratios (r1,r2) of the base points
- * \return
- * * < 0 no coplanar four point sets with large enough sampling distance was found
- * * = 0 a set of four congruent points was selected
- */
- int
- selectBase (std::vector <int> &base_indices, float (&ratio)[2]);
-
- /** \brief Select randomly a triplet of points with large point-to-point distances. The minimum point
- * sampling distance is calculated based on the estimated point cloud overlap during initialization.
- *
- * \param[out] base_indices indices of base B
- * \return
- * * < 0 no triangle with large enough base lines could be selected
- * * = 0 base triangle succesully selected
- */
- int
- selectBaseTriangle (std::vector <int> &base_indices);
-
- /** \brief Setup the base (four coplanar points) by ordering the points and computing intersection
- * ratios and segment to segment distances of base diagonal.
- *
- * \param[in,out] base_indices indices of base B (will be reordered)
- * \param[out] ratio diagonal intersection ratios of base points
- */
- void
- setupBase (std::vector <int> &base_indices, float (&ratio)[2]);
-
- /** \brief Calculate intersection ratios and segment to segment distances of base diagonals.
- * \param[in] base_indices indices of base B
- * \param[out] ratio diagonal intersection ratios of base points
- * \return quality value of diagonal intersection
- */
- float
- segmentToSegmentDist (const std::vector <int> &base_indices, float (&ratio)[2]);
-
- /** \brief Search for corresponding point pairs given the distance between two base points.
- *
- * \param[in] idx1 first index of current base segment (in source cloud)
- * \param[in] idx2 second index of current base segment (in source cloud)
- * \param[out] pairs resulting point pairs with point-to-point distance close to ref_dist
- * \return
- * * < 0 no corresponding point pair was found
- * * = 0 at least one point pair candidate was found
- */
- virtual int
- bruteForceCorrespondences (int idx1, int idx2, pcl::Correspondences &pairs);
-
- /** \brief Determine base matches by combining the point pair candidate and search for coinciding
- * intersection points using the diagonal segment ratios of base B. The coincidation threshold is
- * calculated during initialization (coincidation_limit_).
- *
- * \param[in] base_indices indices of base B
- * \param[out] matches vector of candidate matches w.r.t the base B
- * \param[in] pairs_a point pairs corresponding to points of 1st diagonal of base B
- * \param[in] pairs_b point pairs corresponding to points of 2nd diagonal of base B
- * \param[in] ratio diagonal intersection ratios of base points
- * \return
- * * < 0 no base match could be found
- * * = 0 at least one base match was found
- */
- virtual int
- determineBaseMatches (
- const std::vector <int> &base_indices,
- std::vector <std::vector <int> > &matches,
- const pcl::Correspondences &pairs_a,
- const pcl::Correspondences &pairs_b,
- const float (&ratio)[2]);
-
- /** \brief Check if outer rectangle distance of matched points fit with the base rectangle.
- *
- * \param[in] match_indices indices of match M
- * \param[in] ds edge lengths of base B
- * \return
- * * < 0 at least one edge of the match M has no corresponding one in the base B
- * * = 0 edges of match M fits to the ones of base B
- */
- int
- checkBaseMatch (const std::vector <int> &match_indices, const float (&ds)[4]);
-
- /** \brief Method to handle current candidate matches. Here we validate and evaluate the matches w.r.t the
- * base and store the best fitting match (together with its score and estimated transformation).
- * \note For forwards compatibility the results are stored in 'vectors of size 1'.
- *
- * \param[in] base_indices indices of base B
- * \param[in,out] matches vector of candidate matches w.r.t the base B. The candidate matches are
- * reordered during this step.
- * \param[out] candidates vector which contains the candidates matches M
- */
- virtual void
- handleMatches (
- const std::vector <int> &base_indices,
- std::vector <std::vector <int> > &matches,
- MatchingCandidates &candidates);
-
- /** \brief Sets the correspondences between the base B and the match M by using the distance of each point
- * to the centroid of the rectangle.
- *
- * \param[in] base_indices indices of base B
- * \param[in] match_indices indices of match M
- * \param[out] correspondences resulting correspondences
- */
- virtual void
- linkMatchWithBase (
- const std::vector <int> &base_indices,
- std::vector <int> &match_indices,
- pcl::Correspondences &correspondences);
-
- /** \brief Validate the matching by computing the transformation between the source and target based on the
- * four matched points and by comparing the mean square error (MSE) to a threshold. The MSE limit was
- * calculated during initialization (max_mse_).
- *
- * \param[in] base_indices indices of base B
- * \param[in] match_indices indices of match M
- * \param[in] correspondences corresondences between source and target
- * \param[out] transformation resulting transformation matrix
- * \return
- * * < 0 MSE bigger than max_mse_
- * * = 0 MSE smaller than max_mse_
- */
- virtual int
- validateMatch (
- const std::vector <int> &base_indices,
- const std::vector <int> &match_indices,
- const pcl::Correspondences &correspondences,
- Eigen::Matrix4f &transformation);
-
- /** \brief Validate the transformation by calculating the number of inliers after transforming the source cloud.
- * The resulting fitness score is later used as the decision criteria of the best fitting match.
- *
- * \param[out] transformation updated orientation matrix using all inliers
- * \param[out] fitness_score current best fitness_score
- * \note fitness score is only updated if the score of the current transformation exceeds the input one.
- * \return
- * * < 0 if previous result is better than the current one (score remains)
- * * = 0 current result is better than the previous one (score updated)
- */
- virtual int
- validateTransformation (Eigen::Matrix4f &transformation, float &fitness_score);
-
- /** \brief Final computation of best match out of vector of best matches. To avoid cross thread dependencies
- * during parallel running, a best match for each try was calculated.
- * \note For forwards compatibility the candidates are stored in vectors of 'vectors of size 1'.
- * \param[in] candidates vector of candidate matches
- */
- virtual void
- finalCompute (const std::vector <MatchingCandidates > &candidates);
-
-
- /** \brief Normals of source point cloud. */
- NormalsConstPtr source_normals_;
-
- /** \brief Normals of target point cloud. */
- NormalsConstPtr target_normals_;
-
-
- /** \brief Number of threads for parallelization (standard = 1).
- * \note Only used if run compiled with OpenMP.
- */
- int nr_threads_;
-
- /** \brief Estimated overlap between source and target (standard = 0.5). */
- float approx_overlap_;
-
- /** \brief Delta value of 4pcs algorithm (standard = 1.0).
- * It can be used as:
- * * absolute value (normalization = false), value should represent the point accuracy to ensure finding neighbors between source <-> target
- * * relative value (normalization = true), to adjust the internally calculated point accuracy (= point density)
- */
- float delta_;
-
- /** \brief Score threshold to stop calculation with success.
- * If not set by the user it is equal to the approximated overlap
- */
- float score_threshold_;
-
- /** \brief The number of points to uniformly sample the source point cloud. (standard = 0 => full cloud). */
- int nr_samples_;
-
- /** \brief Maximum normal difference of corresponding point pairs in degrees (standard = 90). */
- float max_norm_diff_;
-
- /** \brief Maximum allowed computation time in seconds (standard = 0 => ~unlimited). */
- int max_runtime_;
-
-
- /** \brief Resulting fitness score of the best match. */
- float fitness_score_;
-
-
- /** \brief Estimated diamter of the target point cloud. */
- float diameter_;
-
- /** \brief Estimated squared metric overlap between source and target.
- * \note Internally calculated using the estimated overlap and the extent of the source cloud.
- * It is used to derive the minimum sampling distance of the base points as well as to calculated
- * the number of tries to reliably find a correct match.
- */
- float max_base_diameter_sqr_;
-
- /** \brief Use normals flag. */
- bool use_normals_;
-
- /** \brief Normalize delta flag. */
- bool normalize_delta_;
-
-
- /** \brief A pointer to the vector of source point indices to use after sampling. */
- pcl::IndicesPtr source_indices_;
-
- /** \brief A pointer to the vector of target point indices to use after sampling. */
- pcl::IndicesPtr target_indices_;
-
- /** \brief Maximal difference between corresponding point pairs in source and target.
- * \note Internally calculated using an estimation of the point density.
- */
- float max_pair_diff_;
-
- /** \brief Maximal difference between the length of the base edges and valid match edges.
- * \note Internally calculated using an estimation of the point density.
- */
- float max_edge_diff_;
+ target_indices_ = target_indices;
+ };
- /** \brief Maximal distance between coinciding intersection points to find valid matches.
- * \note Internally calculated using an estimation of the point density.
- */
- float coincidation_limit_;
+ /** \return a pointer to the vector of target indices. */
+ inline IndicesPtr
+ getTargetIndices() const
+ {
+ return (target_indices_);
+ };
+
+ /** \brief Provide a pointer to the normals of the source point cloud.
+ * \param[in] source_normals pointer to the normals of the source pointer cloud.
+ */
+ inline void
+ setSourceNormals(const NormalsConstPtr& source_normals)
+ {
+ source_normals_ = source_normals;
+ };
- /** \brief Maximal mean squared errors of a transformation calculated from a candidate match.
- * \note Internally calculated using an estimation of the point density.
- */
- float max_mse_;
+ /** \return the normals of the source point cloud. */
+ inline NormalsConstPtr
+ getSourceNormals() const
+ {
+ return (source_normals_);
+ };
+
+ /** \brief Provide a pointer to the normals of the target point cloud.
+ * \param[in] target_normals point to the normals of the target point cloud.
+ */
+ inline void
+ setTargetNormals(const NormalsConstPtr& target_normals)
+ {
+ target_normals_ = target_normals;
+ };
- /** \brief Maximal squared point distance between source and target points to count as inlier.
- * \note Internally calculated using an estimation of the point density.
- */
- float max_inlier_dist_sqr_;
+ /** \return the normals of the target point cloud. */
+ inline NormalsConstPtr
+ getTargetNormals() const
+ {
+ return (target_normals_);
+ };
+
+ /** \brief Set the number of used threads if OpenMP is activated.
+ * \param[in] nr_threads the number of used threads
+ */
+ inline void
+ setNumberOfThreads(int nr_threads)
+ {
+ nr_threads_ = nr_threads;
+ };
+ /** \return the number of threads used if OpenMP is activated. */
+ inline int
+ getNumberOfThreads() const
+ {
+ return (nr_threads_);
+ };
+
+ /** \brief Set the constant factor delta which weights the internally calculated
+ * parameters. \param[in] delta the weight factor delta \param[in] normalize flag if
+ * delta should be normalized according to point cloud density
+ */
+ inline void
+ setDelta(float delta, bool normalize = false)
+ {
+ delta_ = delta;
+ normalize_delta_ = normalize;
+ };
+
+ /** \return the constant factor delta which weights the internally calculated
+ * parameters. */
+ inline float
+ getDelta() const
+ {
+ return (delta_);
+ };
+
+ /** \brief Set the approximate overlap between source and target.
+ * \param[in] approx_overlap the estimated overlap
+ */
+ inline void
+ setApproxOverlap(float approx_overlap)
+ {
+ approx_overlap_ = approx_overlap;
+ };
- /** \brief Definition of a small error. */
- const float small_error_;
+ /** \return the approximated overlap between source and target. */
+ inline float
+ getApproxOverlap() const
+ {
+ return (approx_overlap_);
+ };
+
+ /** \brief Set the scoring threshold used for early finishing the method.
+ * \param[in] score_threshold early terminating score criteria
+ */
+ inline void
+ setScoreThreshold(float score_threshold)
+ {
+ score_threshold_ = score_threshold;
+ };
+
+ /** \return the scoring threshold used for early finishing the method. */
+ inline float
+ getScoreThreshold() const
+ {
+ return (score_threshold_);
+ };
+
+ /** \brief Set the number of source samples to use during alignment.
+ * \param[in] nr_samples the number of source samples
+ */
+ inline void
+ setNumberOfSamples(int nr_samples)
+ {
+ nr_samples_ = nr_samples;
+ };
+
+ /** \return the number of source samples to use during alignment. */
+ inline int
+ getNumberOfSamples() const
+ {
+ return (nr_samples_);
+ };
+
+ /** \brief Set the maximum normal difference between valid point correspondences in
+ * degree. \param[in] max_norm_diff the maximum difference in degree
+ */
+ inline void
+ setMaxNormalDifference(float max_norm_diff)
+ {
+ max_norm_diff_ = max_norm_diff;
+ };
+
+ /** \return the maximum normal difference between valid point correspondences in
+ * degree. */
+ inline float
+ getMaxNormalDifference() const
+ {
+ return (max_norm_diff_);
+ };
+
+ /** \brief Set the maximum computation time in seconds.
+ * \param[in] max_runtime the maximum runtime of the method in seconds
+ */
+ inline void
+ setMaxComputationTime(int max_runtime)
+ {
+ max_runtime_ = max_runtime;
+ };
- };
- }; // namespace registration
-}; // namespace pcl
+ /** \return the maximum computation time in seconds. */
+ inline int
+ getMaxComputationTime() const
+ {
+ return (max_runtime_);
+ };
+
+ /** \return the fitness score of the best scored four-point match. */
+ inline float
+ getFitnessScore() const
+ {
+ return (fitness_score_);
+ };
+
+protected:
+ using PCLBase<PointSource>::deinitCompute;
+ using PCLBase<PointSource>::input_;
+ using PCLBase<PointSource>::indices_;
+
+ using Registration<PointSource, PointTarget, Scalar>::reg_name_;
+ using Registration<PointSource, PointTarget, Scalar>::target_;
+ using Registration<PointSource, PointTarget, Scalar>::tree_;
+ using Registration<PointSource, PointTarget, Scalar>::correspondences_;
+ using Registration<PointSource, PointTarget, Scalar>::target_cloud_updated_;
+ using Registration<PointSource, PointTarget, Scalar>::final_transformation_;
+ using Registration<PointSource, PointTarget, Scalar>::max_iterations_;
+ using Registration<PointSource, PointTarget, Scalar>::ransac_iterations_;
+ using Registration<PointSource, PointTarget, Scalar>::transformation_estimation_;
+ using Registration<PointSource, PointTarget, Scalar>::converged_;
+
+ /** \brief Rigid transformation computation method.
+ * \param output the transformed input point cloud dataset using the rigid
+ * transformation found \param guess The computed transforamtion
+ */
+ void
+ computeTransformation(PointCloudSource& output,
+ const Eigen::Matrix4f& guess) override;
+
+ /** \brief Internal computation initialization. */
+ virtual bool
+ initCompute();
+
+ /** \brief Select an approximately coplanar set of four points from the source cloud.
+ * \param[out] base_indices selected source cloud indices, further used as base (B)
+ * \param[out] ratio the two diagonal intersection ratios (r1,r2) of the base points
+ * \return
+ * * < 0 no coplanar four point sets with large enough sampling distance was found
+ * * = 0 a set of four congruent points was selected
+ */
+ int
+ selectBase(pcl::Indices& base_indices, float (&ratio)[2]);
+
+ /** \brief Select randomly a triplet of points with large point-to-point distances.
+ * The minimum point sampling distance is calculated based on the estimated point
+ * cloud overlap during initialization.
+ *
+ * \param[out] base_indices indices of base B
+ * \return
+ * * < 0 no triangle with large enough base lines could be selected
+ * * = 0 base triangle succesully selected
+ */
+ int
+ selectBaseTriangle(pcl::Indices& base_indices);
+
+ /** \brief Setup the base (four coplanar points) by ordering the points and computing
+ * intersection ratios and segment to segment distances of base diagonal.
+ *
+ * \param[in,out] base_indices indices of base B (will be reordered)
+ * \param[out] ratio diagonal intersection ratios of base points
+ */
+ void
+ setupBase(pcl::Indices& base_indices, float (&ratio)[2]);
+
+ /** \brief Calculate intersection ratios and segment to segment distances of base
+ * diagonals. \param[in] base_indices indices of base B \param[out] ratio diagonal
+ * intersection ratios of base points \return quality value of diagonal intersection
+ */
+ float
+ segmentToSegmentDist(const pcl::Indices& base_indices, float (&ratio)[2]);
+
+ /** \brief Search for corresponding point pairs given the distance between two base
+ * points.
+ *
+ * \param[in] idx1 first index of current base segment (in source cloud)
+ * \param[in] idx2 second index of current base segment (in source cloud)
+ * \param[out] pairs resulting point pairs with point-to-point distance close to
+ * ref_dist \return
+ * * < 0 no corresponding point pair was found
+ * * = 0 at least one point pair candidate was found
+ */
+ virtual int
+ bruteForceCorrespondences(int idx1, int idx2, pcl::Correspondences& pairs);
+
+ /** \brief Determine base matches by combining the point pair candidate and search for
+ * coinciding intersection points using the diagonal segment ratios of base B. The
+ * coincidation threshold is calculated during initialization (coincidation_limit_).
+ *
+ * \param[in] base_indices indices of base B
+ * \param[out] matches vector of candidate matches w.r.t the base B
+ * \param[in] pairs_a point pairs corresponding to points of 1st diagonal of base B
+ * \param[in] pairs_b point pairs corresponding to points of 2nd diagonal of base B
+ * \param[in] ratio diagonal intersection ratios of base points
+ * \return
+ * * < 0 no base match could be found
+ * * = 0 at least one base match was found
+ */
+ virtual int
+ determineBaseMatches(const pcl::Indices& base_indices,
+ std::vector<pcl::Indices>& matches,
+ const pcl::Correspondences& pairs_a,
+ const pcl::Correspondences& pairs_b,
+ const float (&ratio)[2]);
+
+ /** \brief Check if outer rectangle distance of matched points fit with the base
+ * rectangle.
+ *
+ * \param[in] match_indices indices of match M
+ * \param[in] ds edge lengths of base B
+ * \return
+ * * < 0 at least one edge of the match M has no corresponding one in the base B
+ * * = 0 edges of match M fits to the ones of base B
+ */
+ int
+ checkBaseMatch(const pcl::Indices& match_indices, const float (&ds)[4]);
+
+ /** \brief Method to handle current candidate matches. Here we validate and evaluate
+ * the matches w.r.t the base and store the best fitting match (together with its
+ * score and estimated transformation). \note For forwards compatibility the results
+ * are stored in 'vectors of size 1'.
+ *
+ * \param[in] base_indices indices of base B
+ * \param[in,out] matches vector of candidate matches w.r.t the base B. The candidate
+ * matches are reordered during this step. \param[out] candidates vector which
+ * contains the candidates matches M
+ */
+ virtual void
+ handleMatches(const pcl::Indices& base_indices,
+ std::vector<pcl::Indices>& matches,
+ MatchingCandidates& candidates);
+
+ /** \brief Sets the correspondences between the base B and the match M by using the
+ * distance of each point to the centroid of the rectangle.
+ *
+ * \param[in] base_indices indices of base B
+ * \param[in] match_indices indices of match M
+ * \param[out] correspondences resulting correspondences
+ */
+ virtual void
+ linkMatchWithBase(const pcl::Indices& base_indices,
+ pcl::Indices& match_indices,
+ pcl::Correspondences& correspondences);
+
+ /** \brief Validate the matching by computing the transformation between the source
+ * and target based on the four matched points and by comparing the mean square error
+ * (MSE) to a threshold. The MSE limit was calculated during initialization
+ * (max_mse_).
+ *
+ * \param[in] base_indices indices of base B
+ * \param[in] match_indices indices of match M
+ * \param[in] correspondences corresondences between source and target
+ * \param[out] transformation resulting transformation matrix
+ * \return
+ * * < 0 MSE bigger than max_mse_
+ * * = 0 MSE smaller than max_mse_
+ */
+ virtual int
+ validateMatch(const pcl::Indices& base_indices,
+ const pcl::Indices& match_indices,
+ const pcl::Correspondences& correspondences,
+ Eigen::Matrix4f& transformation);
+
+ /** \brief Validate the transformation by calculating the number of inliers after
+ * transforming the source cloud. The resulting fitness score is later used as the
+ * decision criteria of the best fitting match.
+ *
+ * \param[out] transformation updated orientation matrix using all inliers
+ * \param[out] fitness_score current best fitness_score
+ * \note fitness score is only updated if the score of the current transformation
+ * exceeds the input one. \return
+ * * < 0 if previous result is better than the current one (score remains)
+ * * = 0 current result is better than the previous one (score updated)
+ */
+ virtual int
+ validateTransformation(Eigen::Matrix4f& transformation, float& fitness_score);
+
+ /** \brief Final computation of best match out of vector of best matches. To avoid
+ * cross thread dependencies during parallel running, a best match for each try was
+ * calculated. \note For forwards compatibility the candidates are stored in vectors
+ * of 'vectors of size 1'. \param[in] candidates vector of candidate matches
+ */
+ virtual void
+ finalCompute(const std::vector<MatchingCandidates>& candidates);
+
+ /** \brief Normals of source point cloud. */
+ NormalsConstPtr source_normals_;
+
+ /** \brief Normals of target point cloud. */
+ NormalsConstPtr target_normals_;
+
+ /** \brief Number of threads for parallelization (standard = 1).
+ * \note Only used if run compiled with OpenMP.
+ */
+ int nr_threads_;
+
+ /** \brief Estimated overlap between source and target (standard = 0.5). */
+ float approx_overlap_;
+
+ /** \brief Delta value of 4pcs algorithm (standard = 1.0).
+ * It can be used as:
+ * * absolute value (normalization = false), value should represent the point accuracy
+ * to ensure finding neighbors between source <-> target
+ * * relative value (normalization = true), to adjust the internally calculated point
+ * accuracy (= point density)
+ */
+ float delta_;
+
+ /** \brief Score threshold to stop calculation with success.
+ * If not set by the user it depends on the size of the approximated overlap
+ */
+ float score_threshold_;
+
+ /** \brief The number of points to uniformly sample the source point cloud. (standard
+ * = 0 => full cloud). */
+ int nr_samples_;
+
+ /** \brief Maximum normal difference of corresponding point pairs in degrees (standard
+ * = 90). */
+ float max_norm_diff_;
+
+ /** \brief Maximum allowed computation time in seconds (standard = 0 => ~unlimited).
+ */
+ int max_runtime_;
+
+ /** \brief Resulting fitness score of the best match. */
+ float fitness_score_;
+
+ /** \brief Estimated diamter of the target point cloud. */
+ float diameter_;
+
+ /** \brief Estimated squared metric overlap between source and target.
+ * \note Internally calculated using the estimated overlap and the extent of the
+ * source cloud. It is used to derive the minimum sampling distance of the base points
+ * as well as to calculated the number of tries to reliably find a correct match.
+ */
+ float max_base_diameter_sqr_;
+
+ /** \brief Use normals flag. */
+ bool use_normals_;
+
+ /** \brief Normalize delta flag. */
+ bool normalize_delta_;
+
+ /** \brief A pointer to the vector of source point indices to use after sampling. */
+ pcl::IndicesPtr source_indices_;
+
+ /** \brief A pointer to the vector of target point indices to use after sampling. */
+ pcl::IndicesPtr target_indices_;
+
+ /** \brief Maximal difference between corresponding point pairs in source and target.
+ * \note Internally calculated using an estimation of the point density.
+ */
+ float max_pair_diff_;
+
+ /** \brief Maximal difference between the length of the base edges and valid match
+ * edges. \note Internally calculated using an estimation of the point density.
+ */
+ float max_edge_diff_;
+
+ /** \brief Maximal distance between coinciding intersection points to find valid
+ * matches. \note Internally calculated using an estimation of the point density.
+ */
+ float coincidation_limit_;
+
+ /** \brief Maximal mean squared errors of a transformation calculated from a candidate
+ * match. \note Internally calculated using an estimation of the point density.
+ */
+ float max_mse_;
+
+ /** \brief Maximal squared point distance between source and target points to count as
+ * inlier. \note Internally calculated using an estimation of the point density.
+ */
+ float max_inlier_dist_sqr_;
+
+ /** \brief Definition of a small error. */
+ const float small_error_;
+};
+}; // namespace registration
+}; // namespace pcl
#include <pcl/registration/impl/ia_fpcs.hpp>
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
#include <pcl/registration/ia_fpcs.h>
-namespace pcl
-{
- namespace registration
+namespace pcl {
+namespace registration {
+/** \brief KFPCSInitialAlignment computes corresponding four point congruent sets based
+ * on keypoints as described in: "Markerless point cloud registration with
+ * keypoint-based 4-points congruent sets", Pascal Theiler, Jan Dirk Wegner, Konrad
+ * Schindler. ISPRS Annals II-5/W2, 2013. Presented at ISPRS Workshop Laser Scanning,
+ * Antalya, Turkey, 2013. \note Method has since been improved and some variations to
+ * the paper exist. \author P.W.Theiler \ingroup registration
+ */
+template <typename PointSource,
+ typename PointTarget,
+ typename NormalT = pcl::Normal,
+ typename Scalar = float>
+class KFPCSInitialAlignment
+: public virtual FPCSInitialAlignment<PointSource, PointTarget, NormalT, Scalar> {
+public:
+ /** \cond */
+ using Ptr =
+ shared_ptr<KFPCSInitialAlignment<PointSource, PointTarget, NormalT, Scalar>>;
+ using ConstPtr = shared_ptr<
+ const KFPCSInitialAlignment<PointSource, PointTarget, NormalT, Scalar>>;
+
+ using PointCloudSource = pcl::PointCloud<PointSource>;
+ using PointCloudSourcePtr = typename PointCloudSource::Ptr;
+ using PointCloudSourceIterator = typename PointCloudSource::iterator;
+
+ using PointCloudTarget = pcl::PointCloud<PointTarget>;
+ using PointCloudTargetPtr = typename PointCloudTarget::Ptr;
+ using PointCloudTargetIterator = typename PointCloudTarget::iterator;
+
+ using MatchingCandidate = pcl::registration::MatchingCandidate;
+ using MatchingCandidates = pcl::registration::MatchingCandidates;
+ /** \endcond */
+
+ /** \brief Constructor. */
+ KFPCSInitialAlignment();
+
+ /** \brief Destructor. */
+ ~KFPCSInitialAlignment(){};
+
+ /** \brief Set the upper translation threshold used for score evaluation.
+ * \param[in] upper_trl_boundary upper translation threshold
+ */
+ inline void
+ setUpperTranslationThreshold(float upper_trl_boundary)
{
- /** \brief KFPCSInitialAlignment computes corresponding four point congruent sets based on keypoints
- * as described in: "Markerless point cloud registration with keypoint-based 4-points congruent sets",
- * Pascal Theiler, Jan Dirk Wegner, Konrad Schindler. ISPRS Annals II-5/W2, 2013. Presented at ISPRS Workshop
- * Laser Scanning, Antalya, Turkey, 2013.
- * \note Method has since been improved and some variations to the paper exist.
- * \author P.W.Theiler
- * \ingroup registration
- */
- template <typename PointSource, typename PointTarget, typename NormalT = pcl::Normal, typename Scalar = float>
- class KFPCSInitialAlignment : public virtual FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>
- {
- public:
- /** \cond */
- using Ptr = shared_ptr <KFPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar> >;
- using ConstPtr = shared_ptr <const KFPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar> >;
-
- using PointCloudSource = pcl::PointCloud<PointSource>;
- using PointCloudSourcePtr = typename PointCloudSource::Ptr;
- using PointCloudSourceIterator = typename PointCloudSource::iterator;
-
- using PointCloudTarget = pcl::PointCloud<PointTarget>;
- using PointCloudTargetPtr = typename PointCloudTarget::Ptr;
- using PointCloudTargetIterator = typename PointCloudTarget::iterator;
-
- using MatchingCandidate = pcl::registration::MatchingCandidate;
- using MatchingCandidates = pcl::registration::MatchingCandidates;
- /** \endcond */
-
-
- /** \brief Constructor. */
- KFPCSInitialAlignment ();
-
- /** \brief Destructor. */
- ~KFPCSInitialAlignment ()
- {};
-
-
- /** \brief Set the upper translation threshold used for score evaluation.
- * \param[in] upper_trl_boundary upper translation threshold
- */
- inline void
- setUpperTranslationThreshold (float upper_trl_boundary)
- {
- upper_trl_boundary_ = upper_trl_boundary;
- };
-
- /** \return the upper translation threshold used for score evaluation. */
- inline float
- getUpperTranslationThreshold () const
- {
- return (upper_trl_boundary_);
- };
-
-
- /** \brief Set the lower translation threshold used for score evaluation.
- * \param[in] lower_trl_boundary lower translation threshold
- */
- inline void
- setLowerTranslationThreshold (float lower_trl_boundary)
- {
- lower_trl_boundary_ = lower_trl_boundary;
- };
-
- /** \return the lower translation threshold used for score evaluation. */
- inline float
- getLowerTranslationThreshold () const
- {
- return (lower_trl_boundary_);
- };
-
-
- /** \brief Set the weighting factor of the translation cost term.
- * \param[in] lambda the weighting factor of the translation cost term
- */
- inline void
- setLambda (float lambda)
- {
- lambda_ = lambda;
- };
-
- /** \return the weighting factor of the translation cost term. */
- inline float
- getLambda () const
- {
- return (lambda_);
- };
-
-
- /** \brief Get the N best unique candidate matches according to their fitness score.
- * The method only returns unique transformations comparing the translation
- * and the 3D rotation to already returned transformations.
- *
- * \note The method may return less than N candidates, if the number of unique candidates
- * is smaller than N
- *
- * \param[in] n number of best candidates to return
- * \param[in] min_angle3d minimum 3D angle difference in radian
- * \param[in] min_translation3d minimum 3D translation difference
- * \param[out] candidates vector of unique candidates
- */
- void
- getNBestCandidates (int n, float min_angle3d, float min_translation3d, MatchingCandidates &candidates);
-
- /** \brief Get all unique candidate matches with fitness scores above a threshold t.
- * The method only returns unique transformations comparing the translation
- * and the 3D rotation to already returned transformations.
- *
- * \param[in] t fitness score threshold
- * \param[in] min_angle3d minimum 3D angle difference in radian
- * \param[in] min_translation3d minimum 3D translation difference
- * \param[out] candidates vector of unique candidates
- */
- void
- getTBestCandidates (float t, float min_angle3d, float min_translation3d, MatchingCandidates &candidates);
-
+ upper_trl_boundary_ = upper_trl_boundary;
+ };
- protected:
-
- using PCLBase <PointSource>::deinitCompute;
- using PCLBase <PointSource>::input_;
- using PCLBase <PointSource>::indices_;
-
- using Registration <PointSource, PointTarget, Scalar>::reg_name_;
- using Registration <PointSource, PointTarget, Scalar>::tree_;
- using Registration <PointSource, PointTarget, Scalar>::final_transformation_;
- using Registration <PointSource, PointTarget, Scalar>::ransac_iterations_;
- using Registration <PointSource, PointTarget, Scalar>::correspondences_;
- using Registration <PointSource, PointTarget, Scalar>::converged_;
-
- using FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::delta_;
- using FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::approx_overlap_;
- using FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::max_pair_diff_;
- using FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::max_edge_diff_;
- using FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::coincidation_limit_;
- using FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::max_mse_;
- using FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::max_inlier_dist_sqr_;
- using FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::diameter_;
- using FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::normalize_delta_;
- using FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::fitness_score_;
- using FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::score_threshold_;
- using FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::linkMatchWithBase;
- using FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::validateMatch;
-
-
- /** \brief Internal computation initialization. */
- bool
- initCompute () override;
-
- /** \brief Method to handle current candidate matches. Here we validate and evaluate the matches w.r.t the
- * base and store the sorted matches (together with score values and estimated transformations).
- *
- * \param[in] base_indices indices of base B
- * \param[in,out] matches vector of candidate matches w.r.t the base B. The candidate matches are
- * reordered during this step.
- * \param[out] candidates vector which contains the candidates matches M
- */
- void
- handleMatches (
- const std::vector <int> &base_indices,
- std::vector <std::vector <int> > &matches,
- MatchingCandidates &candidates) override;
-
- /** \brief Validate the transformation by calculating the score value after transforming the input source cloud.
- * The resulting score is later used as the decision criteria of the best fitting match.
- *
- * \param[out] transformation updated orientation matrix using all inliers
- * \param[out] fitness_score current best score
- * \note fitness score is only updated if the score of the current transformation exceeds the input one.
- * \return
- * * < 0 if previous result is better than the current one (score remains)
- * * = 0 current result is better than the previous one (score updated)
- */
- int
- validateTransformation (Eigen::Matrix4f &transformation, float &fitness_score) override;
-
- /** \brief Final computation of best match out of vector of matches. To avoid cross thread dependencies
- * during parallel running, a best match for each try was calculated.
- * \note For forwards compatibility the candidates are stored in vectors of 'vectors of size 1'.
- * \param[in] candidates vector of candidate matches
- */
- void
- finalCompute (const std::vector <MatchingCandidates > &candidates) override;
-
-
- /** \brief Lower boundary for translation costs calculation.
- * \note If not set by the user, the translation costs are not used during evaluation.
- */
- float lower_trl_boundary_;
-
- /** \brief Upper boundary for translation costs calculation.
- * \note If not set by the user, it is calculated from the estimated overlap and the diameter
- * of the point cloud.
- */
- float upper_trl_boundary_;
-
- /** \brief Weighting factor for translation costs (standard = 0.5). */
- float lambda_;
-
-
- /** \brief Container for resulting vector of registration candidates. */
- MatchingCandidates candidates_;
-
- /** \brief Flag if translation score should be used in validation (internal calculation). */
- bool use_trl_score_;
+ /** \return the upper translation threshold used for score evaluation. */
+ inline float
+ getUpperTranslationThreshold() const
+ {
+ return (upper_trl_boundary_);
+ };
+
+ /** \brief Set the lower translation threshold used for score evaluation.
+ * \param[in] lower_trl_boundary lower translation threshold
+ */
+ inline void
+ setLowerTranslationThreshold(float lower_trl_boundary)
+ {
+ lower_trl_boundary_ = lower_trl_boundary;
+ };
- /** \brief Subset of input indices on which we evaluate candidates.
- * To speed up the evaluation, we only use a fix number of indices defined during initialization.
- */
- pcl::IndicesPtr indices_validation_;
+ /** \return the lower translation threshold used for score evaluation. */
+ inline float
+ getLowerTranslationThreshold() const
+ {
+ return (lower_trl_boundary_);
+ };
+
+ /** \brief Set the weighting factor of the translation cost term.
+ * \param[in] lambda the weighting factor of the translation cost term
+ */
+ inline void
+ setLambda(float lambda)
+ {
+ lambda_ = lambda;
+ };
- };
- }; // namespace registration
-}; // namespace pcl
+ /** \return the weighting factor of the translation cost term. */
+ inline float
+ getLambda() const
+ {
+ return (lambda_);
+ };
+
+ /** \brief Get the N best unique candidate matches according to their fitness score.
+ * The method only returns unique transformations comparing the translation
+ * and the 3D rotation to already returned transformations.
+ *
+ * \note The method may return less than N candidates, if the number of unique
+ * candidates is smaller than N
+ *
+ * \param[in] n number of best candidates to return
+ * \param[in] min_angle3d minimum 3D angle difference in radian
+ * \param[in] min_translation3d minimum 3D translation difference
+ * \param[out] candidates vector of unique candidates
+ */
+ void
+ getNBestCandidates(int n,
+ float min_angle3d,
+ float min_translation3d,
+ MatchingCandidates& candidates);
+
+ /** \brief Get all unique candidate matches with fitness scores above a threshold t.
+ * The method only returns unique transformations comparing the translation
+ * and the 3D rotation to already returned transformations.
+ *
+ * \param[in] t fitness score threshold
+ * \param[in] min_angle3d minimum 3D angle difference in radian
+ * \param[in] min_translation3d minimum 3D translation difference
+ * \param[out] candidates vector of unique candidates
+ */
+ void
+ getTBestCandidates(float t,
+ float min_angle3d,
+ float min_translation3d,
+ MatchingCandidates& candidates);
+
+protected:
+ using PCLBase<PointSource>::deinitCompute;
+ using PCLBase<PointSource>::input_;
+ using PCLBase<PointSource>::indices_;
+
+ using Registration<PointSource, PointTarget, Scalar>::reg_name_;
+ using Registration<PointSource, PointTarget, Scalar>::tree_;
+ using Registration<PointSource, PointTarget, Scalar>::final_transformation_;
+ using Registration<PointSource, PointTarget, Scalar>::ransac_iterations_;
+ using Registration<PointSource, PointTarget, Scalar>::correspondences_;
+ using Registration<PointSource, PointTarget, Scalar>::converged_;
+
+ using FPCSInitialAlignment<PointSource, PointTarget, NormalT, Scalar>::delta_;
+ using FPCSInitialAlignment<PointSource, PointTarget, NormalT, Scalar>::
+ approx_overlap_;
+ using FPCSInitialAlignment<PointSource, PointTarget, NormalT, Scalar>::max_pair_diff_;
+ using FPCSInitialAlignment<PointSource, PointTarget, NormalT, Scalar>::max_edge_diff_;
+ using FPCSInitialAlignment<PointSource, PointTarget, NormalT, Scalar>::
+ coincidation_limit_;
+ using FPCSInitialAlignment<PointSource, PointTarget, NormalT, Scalar>::max_mse_;
+ using FPCSInitialAlignment<PointSource, PointTarget, NormalT, Scalar>::
+ max_inlier_dist_sqr_;
+ using FPCSInitialAlignment<PointSource, PointTarget, NormalT, Scalar>::diameter_;
+ using FPCSInitialAlignment<PointSource, PointTarget, NormalT, Scalar>::
+ normalize_delta_;
+ using FPCSInitialAlignment<PointSource, PointTarget, NormalT, Scalar>::fitness_score_;
+ using FPCSInitialAlignment<PointSource, PointTarget, NormalT, Scalar>::
+ score_threshold_;
+ using FPCSInitialAlignment<PointSource, PointTarget, NormalT, Scalar>::
+ linkMatchWithBase;
+ using FPCSInitialAlignment<PointSource, PointTarget, NormalT, Scalar>::validateMatch;
+
+ /** \brief Internal computation initialization. */
+ bool
+ initCompute() override;
+
+ /** \brief Method to handle current candidate matches. Here we validate and evaluate
+ * the matches w.r.t the base and store the sorted matches (together with score values
+ * and estimated transformations).
+ *
+ * \param[in] base_indices indices of base B
+ * \param[in,out] matches vector of candidate matches w.r.t the base B. The candidate
+ * matches are reordered during this step. \param[out] candidates vector which
+ * contains the candidates matches M
+ */
+ void
+ handleMatches(const pcl::Indices& base_indices,
+ std::vector<pcl::Indices>& matches,
+ MatchingCandidates& candidates) override;
+
+ /** \brief Validate the transformation by calculating the score value after
+ * transforming the input source cloud. The resulting score is later used as the
+ * decision criteria of the best fitting match.
+ *
+ * \param[out] transformation updated orientation matrix using all inliers
+ * \param[out] fitness_score current best score
+ * \note fitness score is only updated if the score of the current transformation
+ * exceeds the input one. \return
+ * * < 0 if previous result is better than the current one (score remains)
+ * * = 0 current result is better than the previous one (score updated)
+ */
+ int
+ validateTransformation(Eigen::Matrix4f& transformation,
+ float& fitness_score) override;
+
+ /** \brief Final computation of best match out of vector of matches. To avoid cross
+ * thread dependencies during parallel running, a best match for each try was
+ * calculated. \note For forwards compatibility the candidates are stored in vectors
+ * of 'vectors of size 1'. \param[in] candidates vector of candidate matches
+ */
+ void
+ finalCompute(const std::vector<MatchingCandidates>& candidates) override;
+
+ /** \brief Lower boundary for translation costs calculation.
+ * \note If not set by the user, the translation costs are not used during evaluation.
+ */
+ float lower_trl_boundary_;
+
+ /** \brief Upper boundary for translation costs calculation.
+ * \note If not set by the user, it is calculated from the estimated overlap and the
+ * diameter of the point cloud.
+ */
+ float upper_trl_boundary_;
+
+ /** \brief Weighting factor for translation costs (standard = 0.5). */
+ float lambda_;
+
+ /** \brief Container for resulting vector of registration candidates. */
+ MatchingCandidates candidates_;
+
+ /** \brief Flag if translation score should be used in validation (internal
+ * calculation). */
+ bool use_trl_score_;
+
+ /** \brief Subset of input indices on which we evaluate candidates.
+ * To speed up the evaluation, we only use a fix number of indices defined during
+ * initialization.
+ */
+ pcl::IndicesPtr indices_validation_;
+};
+}; // namespace registration
+}; // namespace pcl
#include <pcl/registration/impl/ia_kfpcs.hpp>
#pragma once
-#include <pcl/memory.h>
#include <pcl/registration/registration.h>
#include <pcl/registration/transformation_estimation_svd.h>
+#include <pcl/memory.h>
+
+namespace pcl {
+/** \brief @b SampleConsensusInitialAlignment is an implementation of the initial
+ * alignment algorithm described in section IV of "Fast Point Feature Histograms (FPFH)
+ * for 3D Registration," Rusu et al. \author Michael Dixon, Radu B. Rusu
+ * \ingroup registration
+ */
+template <typename PointSource, typename PointTarget, typename FeatureT>
+class SampleConsensusInitialAlignment : public Registration<PointSource, PointTarget> {
+public:
+ using Registration<PointSource, PointTarget>::reg_name_;
+ using Registration<PointSource, PointTarget>::input_;
+ using Registration<PointSource, PointTarget>::indices_;
+ using Registration<PointSource, PointTarget>::target_;
+ using Registration<PointSource, PointTarget>::final_transformation_;
+ using Registration<PointSource, PointTarget>::transformation_;
+ using Registration<PointSource, PointTarget>::corr_dist_threshold_;
+ using Registration<PointSource, PointTarget>::min_number_correspondences_;
+ using Registration<PointSource, PointTarget>::max_iterations_;
+ using Registration<PointSource, PointTarget>::tree_;
+ using Registration<PointSource, PointTarget>::transformation_estimation_;
+ using Registration<PointSource, PointTarget>::converged_;
+ using Registration<PointSource, PointTarget>::getClassName;
+
+ using PointCloudSource =
+ typename Registration<PointSource, PointTarget>::PointCloudSource;
+ using PointCloudSourcePtr = typename PointCloudSource::Ptr;
+ using PointCloudSourceConstPtr = typename PointCloudSource::ConstPtr;
+
+ using PointCloudTarget =
+ typename Registration<PointSource, PointTarget>::PointCloudTarget;
+
+ using PointIndicesPtr = PointIndices::Ptr;
+ using PointIndicesConstPtr = PointIndices::ConstPtr;
+
+ using FeatureCloud = pcl::PointCloud<FeatureT>;
+ using FeatureCloudPtr = typename FeatureCloud::Ptr;
+ using FeatureCloudConstPtr = typename FeatureCloud::ConstPtr;
+
+ using Ptr =
+ shared_ptr<SampleConsensusInitialAlignment<PointSource, PointTarget, FeatureT>>;
+ using ConstPtr = shared_ptr<
+ const SampleConsensusInitialAlignment<PointSource, PointTarget, FeatureT>>;
+
+ class ErrorFunctor {
+ public:
+ using Ptr = shared_ptr<ErrorFunctor>;
+ using ConstPtr = shared_ptr<const ErrorFunctor>;
+
+ virtual ~ErrorFunctor() = default;
+ virtual float
+ operator()(float d) const = 0;
+ };
+
+ class HuberPenalty : public ErrorFunctor {
+ private:
+ HuberPenalty() {}
+
+ public:
+ HuberPenalty(float threshold) : threshold_(threshold) {}
+ virtual float
+ operator()(float e) const
+ {
+ if (e <= threshold_)
+ return (0.5 * e * e);
+ return (0.5 * threshold_ * (2.0 * std::fabs(e) - threshold_));
+ }
+
+ protected:
+ float threshold_;
+ };
+
+ class TruncatedError : public ErrorFunctor {
+ private:
+ TruncatedError() {}
+
+ public:
+ ~TruncatedError() {}
+
+ TruncatedError(float threshold) : threshold_(threshold) {}
+ float
+ operator()(float e) const override
+ {
+ if (e <= threshold_)
+ return (e / threshold_);
+ return (1.0);
+ }
+
+ protected:
+ float threshold_;
+ };
+
+ using ErrorFunctorPtr = typename ErrorFunctor::Ptr;
+
+ using FeatureKdTreePtr = typename KdTreeFLANN<FeatureT>::Ptr;
+ /** \brief Constructor. */
+ SampleConsensusInitialAlignment()
+ : input_features_()
+ , target_features_()
+ , nr_samples_(3)
+ , min_sample_distance_(0.0f)
+ , k_correspondences_(10)
+ , feature_tree_(new pcl::KdTreeFLANN<FeatureT>)
+ , error_functor_()
+ {
+ reg_name_ = "SampleConsensusInitialAlignment";
+ max_iterations_ = 1000;
+
+ // Setting a non-std::numeric_limits<double>::max () value to corr_dist_threshold_
+ // to make it play nicely with TruncatedError
+ corr_dist_threshold_ = 100.0f;
+ transformation_estimation_.reset(
+ new pcl::registration::TransformationEstimationSVD<PointSource, PointTarget>);
+ };
-namespace pcl
-{
- /** \brief @b SampleConsensusInitialAlignment is an implementation of the initial alignment algorithm described in
- * section IV of "Fast Point Feature Histograms (FPFH) for 3D Registration," Rusu et al.
- * \author Michael Dixon, Radu B. Rusu
- * \ingroup registration
- */
- template <typename PointSource, typename PointTarget, typename FeatureT>
- class SampleConsensusInitialAlignment : public Registration<PointSource, PointTarget>
+ /** \brief Provide a shared pointer to the source point cloud's feature descriptors
+ * \param features the source point cloud's features
+ */
+ void
+ setSourceFeatures(const FeatureCloudConstPtr& features);
+
+ /** \brief Get a pointer to the source point cloud's features */
+ inline FeatureCloudConstPtr const
+ getSourceFeatures()
+ {
+ return (input_features_);
+ }
+
+ /** \brief Provide a shared pointer to the target point cloud's feature descriptors
+ * \param features the target point cloud's features
+ */
+ void
+ setTargetFeatures(const FeatureCloudConstPtr& features);
+
+ /** \brief Get a pointer to the target point cloud's features */
+ inline FeatureCloudConstPtr const
+ getTargetFeatures()
+ {
+ return (target_features_);
+ }
+
+ /** \brief Set the minimum distances between samples
+ * \param min_sample_distance the minimum distances between samples
+ */
+ void
+ setMinSampleDistance(float min_sample_distance)
+ {
+ min_sample_distance_ = min_sample_distance;
+ }
+
+ /** \brief Get the minimum distances between samples, as set by the user */
+ float
+ getMinSampleDistance()
+ {
+ return (min_sample_distance_);
+ }
+
+ /** \brief Set the number of samples to use during each iteration
+ * \param nr_samples the number of samples to use during each iteration
+ */
+ void
+ setNumberOfSamples(int nr_samples)
+ {
+ nr_samples_ = nr_samples;
+ }
+
+ /** \brief Get the number of samples to use during each iteration, as set by the user
+ */
+ int
+ getNumberOfSamples()
{
- public:
- using Registration<PointSource, PointTarget>::reg_name_;
- using Registration<PointSource, PointTarget>::input_;
- using Registration<PointSource, PointTarget>::indices_;
- using Registration<PointSource, PointTarget>::target_;
- using Registration<PointSource, PointTarget>::final_transformation_;
- using Registration<PointSource, PointTarget>::transformation_;
- using Registration<PointSource, PointTarget>::corr_dist_threshold_;
- using Registration<PointSource, PointTarget>::min_number_correspondences_;
- using Registration<PointSource, PointTarget>::max_iterations_;
- using Registration<PointSource, PointTarget>::tree_;
- using Registration<PointSource, PointTarget>::transformation_estimation_;
- using Registration<PointSource, PointTarget>::converged_;
- using Registration<PointSource, PointTarget>::getClassName;
-
- using PointCloudSource = typename Registration<PointSource, PointTarget>::PointCloudSource;
- using PointCloudSourcePtr = typename PointCloudSource::Ptr;
- using PointCloudSourceConstPtr = typename PointCloudSource::ConstPtr;
-
- using PointCloudTarget = typename Registration<PointSource, PointTarget>::PointCloudTarget;
-
- using PointIndicesPtr = PointIndices::Ptr;
- using PointIndicesConstPtr = PointIndices::ConstPtr;
-
- using FeatureCloud = pcl::PointCloud<FeatureT>;
- using FeatureCloudPtr = typename FeatureCloud::Ptr;
- using FeatureCloudConstPtr = typename FeatureCloud::ConstPtr;
-
- using Ptr = shared_ptr<SampleConsensusInitialAlignment<PointSource, PointTarget, FeatureT> >;
- using ConstPtr = shared_ptr<const SampleConsensusInitialAlignment<PointSource, PointTarget, FeatureT> >;
-
-
- class ErrorFunctor
- {
- public:
- using Ptr = shared_ptr<ErrorFunctor>;
- using ConstPtr = shared_ptr<const ErrorFunctor>;
-
- virtual ~ErrorFunctor () = default;
- virtual float operator () (float d) const = 0;
- };
-
- class HuberPenalty : public ErrorFunctor
- {
- private:
- HuberPenalty () {}
- public:
- HuberPenalty (float threshold) : threshold_ (threshold) {}
- virtual float operator () (float e) const
- {
- if (e <= threshold_)
- return (0.5 * e*e);
- return (0.5 * threshold_ * (2.0 * std::fabs (e) - threshold_));
- }
- protected:
- float threshold_;
- };
-
- class TruncatedError : public ErrorFunctor
- {
- private:
- TruncatedError () {}
- public:
- ~TruncatedError () {}
-
- TruncatedError (float threshold) : threshold_ (threshold) {}
- float operator () (float e) const override
- {
- if (e <= threshold_)
- return (e / threshold_);
- return (1.0);
- }
- protected:
- float threshold_;
- };
-
- using ErrorFunctorPtr = typename ErrorFunctor::Ptr;
-
- using FeatureKdTreePtr = typename KdTreeFLANN<FeatureT>::Ptr;
- /** \brief Constructor. */
- SampleConsensusInitialAlignment () :
- input_features_ (), target_features_ (),
- nr_samples_(3), min_sample_distance_ (0.0f), k_correspondences_ (10),
- feature_tree_ (new pcl::KdTreeFLANN<FeatureT>),
- error_functor_ ()
- {
- reg_name_ = "SampleConsensusInitialAlignment";
- max_iterations_ = 1000;
-
- // Setting a non-std::numeric_limits<double>::max () value to corr_dist_threshold_ to make it play nicely with TruncatedError
- corr_dist_threshold_ = 100.0f;
- transformation_estimation_.reset (new pcl::registration::TransformationEstimationSVD<PointSource, PointTarget>);
- };
-
- /** \brief Provide a shared pointer to the source point cloud's feature descriptors
- * \param features the source point cloud's features
- */
- void
- setSourceFeatures (const FeatureCloudConstPtr &features);
-
- /** \brief Get a pointer to the source point cloud's features */
- inline FeatureCloudConstPtr const
- getSourceFeatures () { return (input_features_); }
-
- /** \brief Provide a shared pointer to the target point cloud's feature descriptors
- * \param features the target point cloud's features
- */
- void
- setTargetFeatures (const FeatureCloudConstPtr &features);
-
- /** \brief Get a pointer to the target point cloud's features */
- inline FeatureCloudConstPtr const
- getTargetFeatures () { return (target_features_); }
-
- /** \brief Set the minimum distances between samples
- * \param min_sample_distance the minimum distances between samples
- */
- void
- setMinSampleDistance (float min_sample_distance) { min_sample_distance_ = min_sample_distance; }
-
- /** \brief Get the minimum distances between samples, as set by the user */
- float
- getMinSampleDistance () { return (min_sample_distance_); }
-
- /** \brief Set the number of samples to use during each iteration
- * \param nr_samples the number of samples to use during each iteration
- */
- void
- setNumberOfSamples (int nr_samples) { nr_samples_ = nr_samples; }
-
- /** \brief Get the number of samples to use during each iteration, as set by the user */
- int
- getNumberOfSamples () { return (nr_samples_); }
-
- /** \brief Set the number of neighbors to use when selecting a random feature correspondence. A higher value will
- * add more randomness to the feature matching.
- * \param k the number of neighbors to use when selecting a random feature correspondence.
- */
- void
- setCorrespondenceRandomness (int k) { k_correspondences_ = k; }
-
- /** \brief Get the number of neighbors used when selecting a random feature correspondence, as set by the user */
- int
- getCorrespondenceRandomness () { return (k_correspondences_); }
-
- /** \brief Specify the error function to minimize
- * \note This call is optional. TruncatedError will be used by default
- * \param[in] error_functor a shared pointer to a subclass of SampleConsensusInitialAlignment::ErrorFunctor
- */
- void
- setErrorFunction (const ErrorFunctorPtr & error_functor) { error_functor_ = error_functor; }
-
- /** \brief Get a shared pointer to the ErrorFunctor that is to be minimized
- * \return A shared pointer to a subclass of SampleConsensusInitialAlignment::ErrorFunctor
- */
- ErrorFunctorPtr
- getErrorFunction () { return (error_functor_); }
-
- protected:
- /** \brief Choose a random index between 0 and n-1
- * \param n the number of possible indices to choose from
- */
- inline int
- getRandomIndex (int n) { return (static_cast<int> (n * (rand () / (RAND_MAX + 1.0)))); };
-
- /** \brief Select \a nr_samples sample points from cloud while making sure that their pairwise distances are
- * greater than a user-defined minimum distance, \a min_sample_distance.
- * \param cloud the input point cloud
- * \param nr_samples the number of samples to select
- * \param min_sample_distance the minimum distance between any two samples
- * \param sample_indices the resulting sample indices
- */
- void
- selectSamples (const PointCloudSource &cloud, int nr_samples, float min_sample_distance,
- std::vector<int> &sample_indices);
-
- /** \brief For each of the sample points, find a list of points in the target cloud whose features are similar to
- * the sample points' features. From these, select one randomly which will be considered that sample point's
- * correspondence.
- * \param input_features a cloud of feature descriptors
- * \param sample_indices the indices of each sample point
- * \param corresponding_indices the resulting indices of each sample's corresponding point in the target cloud
- */
- void
- findSimilarFeatures (const FeatureCloud &input_features, const std::vector<int> &sample_indices,
- std::vector<int> &corresponding_indices);
-
- /** \brief An error metric for that computes the quality of the alignment between the given cloud and the target.
- * \param cloud the input cloud
- * \param threshold distances greater than this value are capped
- */
- float
- computeErrorMetric (const PointCloudSource &cloud, float threshold);
-
- /** \brief Rigid transformation computation method.
- * \param output the transformed input point cloud dataset using the rigid transformation found
- * \param guess The computed transforamtion
- */
- void
- computeTransformation (PointCloudSource &output, const Eigen::Matrix4f& guess) override;
-
- /** \brief The source point cloud's feature descriptors. */
- FeatureCloudConstPtr input_features_;
-
- /** \brief The target point cloud's feature descriptors. */
- FeatureCloudConstPtr target_features_;
-
- /** \brief The number of samples to use during each iteration. */
- int nr_samples_;
-
- /** \brief The minimum distances between samples. */
- float min_sample_distance_;
-
- /** \brief The number of neighbors to use when selecting a random feature correspondence. */
- int k_correspondences_;
-
- /** \brief The KdTree used to compare feature descriptors. */
- FeatureKdTreePtr feature_tree_;
-
- ErrorFunctorPtr error_functor_;
- public:
- PCL_MAKE_ALIGNED_OPERATOR_NEW
+ return (nr_samples_);
+ }
+
+ /** \brief Set the number of neighbors to use when selecting a random feature
+ * correspondence. A higher value will add more randomness to the feature matching.
+ * \param k the number of neighbors to use when selecting a random feature
+ * correspondence.
+ */
+ void
+ setCorrespondenceRandomness(int k)
+ {
+ k_correspondences_ = k;
+ }
+
+ /** \brief Get the number of neighbors used when selecting a random feature
+ * correspondence, as set by the user */
+ int
+ getCorrespondenceRandomness()
+ {
+ return (k_correspondences_);
+ }
+
+ /** \brief Specify the error function to minimize
+ * \note This call is optional. TruncatedError will be used by default
+ * \param[in] error_functor a shared pointer to a subclass of
+ * SampleConsensusInitialAlignment::ErrorFunctor
+ */
+ void
+ setErrorFunction(const ErrorFunctorPtr& error_functor)
+ {
+ error_functor_ = error_functor;
+ }
+
+ /** \brief Get a shared pointer to the ErrorFunctor that is to be minimized
+ * \return A shared pointer to a subclass of
+ * SampleConsensusInitialAlignment::ErrorFunctor
+ */
+ ErrorFunctorPtr
+ getErrorFunction()
+ {
+ return (error_functor_);
+ }
+
+protected:
+ /** \brief Choose a random index between 0 and n-1
+ * \param n the number of possible indices to choose from
+ */
+ inline pcl::index_t
+ getRandomIndex(int n)
+ {
+ return (static_cast<pcl::index_t>(n * (rand() / (RAND_MAX + 1.0))));
};
-}
+
+ /** \brief Select \a nr_samples sample points from cloud while making sure that their
+ * pairwise distances are greater than a user-defined minimum distance, \a
+ * min_sample_distance. \param cloud the input point cloud \param nr_samples the
+ * number of samples to select \param min_sample_distance the minimum distance between
+ * any two samples \param sample_indices the resulting sample indices
+ */
+ void
+ selectSamples(const PointCloudSource& cloud,
+ unsigned int nr_samples,
+ float min_sample_distance,
+ pcl::Indices& sample_indices);
+
+ /** \brief For each of the sample points, find a list of points in the target cloud
+ * whose features are similar to the sample points' features. From these, select one
+ * randomly which will be considered that sample point's correspondence. \param
+ * input_features a cloud of feature descriptors \param sample_indices the indices of
+ * each sample point \param corresponding_indices the resulting indices of each
+ * sample's corresponding point in the target cloud
+ */
+ void
+ findSimilarFeatures(const FeatureCloud& input_features,
+ const pcl::Indices& sample_indices,
+ pcl::Indices& corresponding_indices);
+
+ /** \brief An error metric for that computes the quality of the alignment between the
+ * given cloud and the target. \param cloud the input cloud \param threshold distances
+ * greater than this value are capped
+ */
+ float
+ computeErrorMetric(const PointCloudSource& cloud, float threshold);
+
+ /** \brief Rigid transformation computation method.
+ * \param output the transformed input point cloud dataset using the rigid
+ * transformation found \param guess The computed transforamtion
+ */
+ void
+ computeTransformation(PointCloudSource& output,
+ const Eigen::Matrix4f& guess) override;
+
+ /** \brief The source point cloud's feature descriptors. */
+ FeatureCloudConstPtr input_features_;
+
+ /** \brief The target point cloud's feature descriptors. */
+ FeatureCloudConstPtr target_features_;
+
+ /** \brief The number of samples to use during each iteration. */
+ int nr_samples_;
+
+ /** \brief The minimum distances between samples. */
+ float min_sample_distance_;
+
+ /** \brief The number of neighbors to use when selecting a random feature
+ * correspondence. */
+ int k_correspondences_;
+
+ /** \brief The KdTree used to compare feature descriptors. */
+ FeatureKdTreePtr feature_tree_;
+
+ ErrorFunctorPtr error_functor_;
+
+public:
+ PCL_MAKE_ALIGNED_OPERATOR_NEW
+};
+} // namespace pcl
#include <pcl/registration/impl/ia_ransac.hpp>
#pragma once
// PCL includes
-#include <pcl/memory.h> // for dynamic_pointer_cast, pcl::make_shared, shared_ptr
+#include <pcl/registration/correspondence_estimation.h>
+#include <pcl/registration/default_convergence_criteria.h>
#include <pcl/registration/registration.h>
-#include <pcl/registration/transformation_estimation_svd.h>
#include <pcl/registration/transformation_estimation_point_to_plane_lls.h>
+#include <pcl/registration/transformation_estimation_svd.h>
#include <pcl/registration/transformation_estimation_symmetric_point_to_plane_lls.h>
-#include <pcl/registration/correspondence_estimation.h>
-#include <pcl/registration/default_convergence_criteria.h>
+#include <pcl/memory.h> // for dynamic_pointer_cast, pcl::make_shared, shared_ptr
-
-namespace pcl
-{
- /** \brief @b IterativeClosestPoint provides a base implementation of the Iterative Closest Point algorithm.
- * The transformation is estimated based on Singular Value Decomposition (SVD).
- *
- * The algorithm has several termination criteria:
- *
- * <ol>
- * <li>Number of iterations has reached the maximum user imposed number of iterations (via \ref setMaximumIterations)</li>
- * <li>The epsilon (difference) between the previous transformation and the current estimated transformation is smaller than an user imposed value (via \ref setTransformationEpsilon)</li>
- * <li>The sum of Euclidean squared errors is smaller than a user defined threshold (via \ref setEuclideanFitnessEpsilon)</li>
- * </ol>
- *
- *
- * Usage example:
- * \code
- * IterativeClosestPoint<PointXYZ, PointXYZ> icp;
- * // Set the input source and target
- * icp.setInputCloud (cloud_source);
- * icp.setInputTarget (cloud_target);
- *
- * // Set the max correspondence distance to 5cm (e.g., correspondences with higher distances will be ignored)
- * icp.setMaxCorrespondenceDistance (0.05);
- * // Set the maximum number of iterations (criterion 1)
- * icp.setMaximumIterations (50);
- * // Set the transformation epsilon (criterion 2)
- * icp.setTransformationEpsilon (1e-8);
- * // Set the euclidean distance difference epsilon (criterion 3)
- * icp.setEuclideanFitnessEpsilon (1);
- *
- * // Perform the alignment
- * icp.align (cloud_source_registered);
- *
- * // Obtain the transformation that aligned cloud_source to cloud_source_registered
- * Eigen::Matrix4f transformation = icp.getFinalTransformation ();
- * \endcode
- *
- * \author Radu B. Rusu, Michael Dixon
- * \ingroup registration
- */
- template <typename PointSource, typename PointTarget, typename Scalar = float>
- class IterativeClosestPoint : public Registration<PointSource, PointTarget, Scalar>
+namespace pcl {
+/** \brief @b IterativeClosestPoint provides a base implementation of the Iterative
+ * Closest Point algorithm. The transformation is estimated based on Singular Value
+ * Decomposition (SVD).
+ *
+ * The algorithm has several termination criteria:
+ *
+ * <ol>
+ * <li>Number of iterations has reached the maximum user imposed number of iterations
+ * (via \ref setMaximumIterations)</li> <li>The epsilon (difference) between the
+ * previous transformation and the current estimated transformation is smaller than an
+ * user imposed value (via \ref setTransformationEpsilon)</li> <li>The sum of Euclidean
+ * squared errors is smaller than a user defined threshold (via \ref
+ * setEuclideanFitnessEpsilon)</li>
+ * </ol>
+ *
+ *
+ * Usage example:
+ * \code
+ * IterativeClosestPoint<PointXYZ, PointXYZ> icp;
+ * // Set the input source and target
+ * icp.setInputSource (cloud_source);
+ * icp.setInputTarget (cloud_target);
+ *
+ * // Set the max correspondence distance to 5cm (e.g., correspondences with higher
+ * // distances will be ignored)
+ * icp.setMaxCorrespondenceDistance (0.05);
+ * // Set the maximum number of iterations (criterion 1)
+ * icp.setMaximumIterations (50);
+ * // Set the transformation epsilon (criterion 2)
+ * icp.setTransformationEpsilon (1e-8);
+ * // Set the euclidean distance difference epsilon (criterion 3)
+ * icp.setEuclideanFitnessEpsilon (1);
+ *
+ * // Perform the alignment
+ * icp.align (cloud_source_registered);
+ *
+ * // Obtain the transformation that aligned cloud_source to cloud_source_registered
+ * Eigen::Matrix4f transformation = icp.getFinalTransformation ();
+ * \endcode
+ *
+ * \author Radu B. Rusu, Michael Dixon
+ * \ingroup registration
+ */
+template <typename PointSource, typename PointTarget, typename Scalar = float>
+class IterativeClosestPoint : public Registration<PointSource, PointTarget, Scalar> {
+public:
+ using PointCloudSource =
+ typename Registration<PointSource, PointTarget, Scalar>::PointCloudSource;
+ using PointCloudSourcePtr = typename PointCloudSource::Ptr;
+ using PointCloudSourceConstPtr = typename PointCloudSource::ConstPtr;
+
+ using PointCloudTarget =
+ typename Registration<PointSource, PointTarget, Scalar>::PointCloudTarget;
+ using PointCloudTargetPtr = typename PointCloudTarget::Ptr;
+ using PointCloudTargetConstPtr = typename PointCloudTarget::ConstPtr;
+
+ using PointIndicesPtr = PointIndices::Ptr;
+ using PointIndicesConstPtr = PointIndices::ConstPtr;
+
+ using Ptr = shared_ptr<IterativeClosestPoint<PointSource, PointTarget, Scalar>>;
+ using ConstPtr =
+ shared_ptr<const IterativeClosestPoint<PointSource, PointTarget, Scalar>>;
+
+ using Registration<PointSource, PointTarget, Scalar>::reg_name_;
+ using Registration<PointSource, PointTarget, Scalar>::getClassName;
+ using Registration<PointSource, PointTarget, Scalar>::input_;
+ using Registration<PointSource, PointTarget, Scalar>::indices_;
+ using Registration<PointSource, PointTarget, Scalar>::target_;
+ using Registration<PointSource, PointTarget, Scalar>::nr_iterations_;
+ using Registration<PointSource, PointTarget, Scalar>::max_iterations_;
+ using Registration<PointSource, PointTarget, Scalar>::previous_transformation_;
+ using Registration<PointSource, PointTarget, Scalar>::final_transformation_;
+ using Registration<PointSource, PointTarget, Scalar>::transformation_;
+ using Registration<PointSource, PointTarget, Scalar>::transformation_epsilon_;
+ using Registration<PointSource, PointTarget, Scalar>::
+ transformation_rotation_epsilon_;
+ using Registration<PointSource, PointTarget, Scalar>::converged_;
+ using Registration<PointSource, PointTarget, Scalar>::corr_dist_threshold_;
+ using Registration<PointSource, PointTarget, Scalar>::inlier_threshold_;
+ using Registration<PointSource, PointTarget, Scalar>::min_number_correspondences_;
+ using Registration<PointSource, PointTarget, Scalar>::update_visualizer_;
+ using Registration<PointSource, PointTarget, Scalar>::euclidean_fitness_epsilon_;
+ using Registration<PointSource, PointTarget, Scalar>::correspondences_;
+ using Registration<PointSource, PointTarget, Scalar>::transformation_estimation_;
+ using Registration<PointSource, PointTarget, Scalar>::correspondence_estimation_;
+ using Registration<PointSource, PointTarget, Scalar>::correspondence_rejectors_;
+
+ typename pcl::registration::DefaultConvergenceCriteria<Scalar>::Ptr
+ convergence_criteria_;
+ using Matrix4 = typename Registration<PointSource, PointTarget, Scalar>::Matrix4;
+
+ /** \brief Empty constructor. */
+ IterativeClosestPoint()
+ : x_idx_offset_(0)
+ , y_idx_offset_(0)
+ , z_idx_offset_(0)
+ , nx_idx_offset_(0)
+ , ny_idx_offset_(0)
+ , nz_idx_offset_(0)
+ , use_reciprocal_correspondence_(false)
+ , source_has_normals_(false)
+ , target_has_normals_(false)
{
- public:
- using PointCloudSource = typename Registration<PointSource, PointTarget, Scalar>::PointCloudSource;
- using PointCloudSourcePtr = typename PointCloudSource::Ptr;
- using PointCloudSourceConstPtr = typename PointCloudSource::ConstPtr;
-
- using PointCloudTarget = typename Registration<PointSource, PointTarget, Scalar>::PointCloudTarget;
- using PointCloudTargetPtr = typename PointCloudTarget::Ptr;
- using PointCloudTargetConstPtr = typename PointCloudTarget::ConstPtr;
-
- using PointIndicesPtr = PointIndices::Ptr;
- using PointIndicesConstPtr = PointIndices::ConstPtr;
-
- using Ptr = shared_ptr<IterativeClosestPoint<PointSource, PointTarget, Scalar> >;
- using ConstPtr = shared_ptr<const IterativeClosestPoint<PointSource, PointTarget, Scalar> >;
-
- using Registration<PointSource, PointTarget, Scalar>::reg_name_;
- using Registration<PointSource, PointTarget, Scalar>::getClassName;
- using Registration<PointSource, PointTarget, Scalar>::input_;
- using Registration<PointSource, PointTarget, Scalar>::indices_;
- using Registration<PointSource, PointTarget, Scalar>::target_;
- using Registration<PointSource, PointTarget, Scalar>::nr_iterations_;
- using Registration<PointSource, PointTarget, Scalar>::max_iterations_;
- using Registration<PointSource, PointTarget, Scalar>::previous_transformation_;
- using Registration<PointSource, PointTarget, Scalar>::final_transformation_;
- using Registration<PointSource, PointTarget, Scalar>::transformation_;
- using Registration<PointSource, PointTarget, Scalar>::transformation_epsilon_;
- using Registration<PointSource, PointTarget, Scalar>::transformation_rotation_epsilon_;
- using Registration<PointSource, PointTarget, Scalar>::converged_;
- using Registration<PointSource, PointTarget, Scalar>::corr_dist_threshold_;
- using Registration<PointSource, PointTarget, Scalar>::inlier_threshold_;
- using Registration<PointSource, PointTarget, Scalar>::min_number_correspondences_;
- using Registration<PointSource, PointTarget, Scalar>::update_visualizer_;
- using Registration<PointSource, PointTarget, Scalar>::euclidean_fitness_epsilon_;
- using Registration<PointSource, PointTarget, Scalar>::correspondences_;
- using Registration<PointSource, PointTarget, Scalar>::transformation_estimation_;
- using Registration<PointSource, PointTarget, Scalar>::correspondence_estimation_;
- using Registration<PointSource, PointTarget, Scalar>::correspondence_rejectors_;
-
- typename pcl::registration::DefaultConvergenceCriteria<Scalar>::Ptr convergence_criteria_;
- using Matrix4 = typename Registration<PointSource, PointTarget, Scalar>::Matrix4;
-
- /** \brief Empty constructor. */
- IterativeClosestPoint ()
- : x_idx_offset_ (0)
- , y_idx_offset_ (0)
- , z_idx_offset_ (0)
- , nx_idx_offset_ (0)
- , ny_idx_offset_ (0)
- , nz_idx_offset_ (0)
- , use_reciprocal_correspondence_ (false)
- , source_has_normals_ (false)
- , target_has_normals_ (false)
- {
- reg_name_ = "IterativeClosestPoint";
- transformation_estimation_.reset (new pcl::registration::TransformationEstimationSVD<PointSource, PointTarget, Scalar> ());
- correspondence_estimation_.reset (new pcl::registration::CorrespondenceEstimation<PointSource, PointTarget, Scalar>);
- convergence_criteria_.reset(new pcl::registration::DefaultConvergenceCriteria<Scalar> (nr_iterations_, transformation_, *correspondences_));
- };
-
- /** \brief Empty destructor */
- ~IterativeClosestPoint () {}
-
- /** \brief Returns a pointer to the DefaultConvergenceCriteria used by the IterativeClosestPoint class.
- * This allows to check the convergence state after the align() method as well as to configure
- * DefaultConvergenceCriteria's parameters not available through the ICP API before the align()
- * method is called. Please note that the align method sets max_iterations_,
- * euclidean_fitness_epsilon_ and transformation_epsilon_ and therefore overrides the default / set
- * values of the DefaultConvergenceCriteria instance.
- * \return Pointer to the IterativeClosestPoint's DefaultConvergenceCriteria.
- */
- inline typename pcl::registration::DefaultConvergenceCriteria<Scalar>::Ptr
- getConvergeCriteria ()
- {
- return convergence_criteria_;
- }
-
- /** \brief Provide a pointer to the input source
- * (e.g., the point cloud that we want to align to the target)
- *
- * \param[in] cloud the input point cloud source
- */
- void
- setInputSource (const PointCloudSourceConstPtr &cloud) override
- {
- Registration<PointSource, PointTarget, Scalar>::setInputSource (cloud);
- const auto fields = pcl::getFields<PointSource> ();
- source_has_normals_ = false;
- for (const auto &field : fields)
- {
- if (field.name == "x") x_idx_offset_ = field.offset;
- else if (field.name == "y") y_idx_offset_ = field.offset;
- else if (field.name == "z") z_idx_offset_ = field.offset;
- else if (field.name == "normal_x")
- {
- source_has_normals_ = true;
- nx_idx_offset_ = field.offset;
- }
- else if (field.name == "normal_y")
- {
- source_has_normals_ = true;
- ny_idx_offset_ = field.offset;
- }
- else if (field.name == "normal_z")
- {
- source_has_normals_ = true;
- nz_idx_offset_ = field.offset;
- }
- }
- }
-
- /** \brief Provide a pointer to the input target
- * (e.g., the point cloud that we want to align to the target)
- *
- * \param[in] cloud the input point cloud target
- */
- void
- setInputTarget (const PointCloudTargetConstPtr &cloud) override
- {
- Registration<PointSource, PointTarget, Scalar>::setInputTarget (cloud);
- const auto fields = pcl::getFields<PointSource> ();
- target_has_normals_ = false;
- for (const auto &field : fields)
- {
- if (field.name == "normal_x" || field.name == "normal_y" || field.name == "normal_z")
- {
- target_has_normals_ = true;
- break;
- }
- }
- }
-
- /** \brief Set whether to use reciprocal correspondence or not
- *
- * \param[in] use_reciprocal_correspondence whether to use reciprocal correspondence or not
- */
- inline void
- setUseReciprocalCorrespondences (bool use_reciprocal_correspondence)
- {
- use_reciprocal_correspondence_ = use_reciprocal_correspondence;
- }
-
- /** \brief Obtain whether reciprocal correspondence are used or not */
- inline bool
- getUseReciprocalCorrespondences () const
- {
- return (use_reciprocal_correspondence_);
- }
-
- protected:
-
- /** \brief Apply a rigid transform to a given dataset. Here we check whether whether
- * the dataset has surface normals in addition to XYZ, and rotate normals as well.
- * \param[in] input the input point cloud
- * \param[out] output the resultant output point cloud
- * \param[in] transform a 4x4 rigid transformation
- * \note Can be used with cloud_in equal to cloud_out
- */
- virtual void
- transformCloud (const PointCloudSource &input,
- PointCloudSource &output,
- const Matrix4 &transform);
-
- /** \brief Rigid transformation computation method with initial guess.
- * \param output the transformed input point cloud dataset using the rigid transformation found
- * \param guess the initial guess of the transformation to compute
- */
- void
- computeTransformation (PointCloudSource &output, const Matrix4 &guess) override;
-
- /** \brief Looks at the Estimators and Rejectors and determines whether their blob-setter methods need to be called */
- virtual void
- determineRequiredBlobData ();
-
- /** \brief XYZ fields offset. */
- std::size_t x_idx_offset_, y_idx_offset_, z_idx_offset_;
-
- /** \brief Normal fields offset. */
- std::size_t nx_idx_offset_, ny_idx_offset_, nz_idx_offset_;
-
- /** \brief The correspondence type used for correspondence estimation. */
- bool use_reciprocal_correspondence_;
-
- /** \brief Internal check whether source dataset has normals or not. */
- bool source_has_normals_;
- /** \brief Internal check whether target dataset has normals or not. */
- bool target_has_normals_;
-
- /** \brief Checks for whether estimators and rejectors need various data */
- bool need_source_blob_, need_target_blob_;
+ reg_name_ = "IterativeClosestPoint";
+ transformation_estimation_.reset(
+ new pcl::registration::
+ TransformationEstimationSVD<PointSource, PointTarget, Scalar>());
+ correspondence_estimation_.reset(
+ new pcl::registration::
+ CorrespondenceEstimation<PointSource, PointTarget, Scalar>);
+ convergence_criteria_.reset(
+ new pcl::registration::DefaultConvergenceCriteria<Scalar>(
+ nr_iterations_, transformation_, *correspondences_));
};
- /** \brief @b IterativeClosestPointWithNormals is a special case of
- * IterativeClosestPoint, that uses a transformation estimated based on
- * Point to Plane distances by default.
- *
- * By default, this implementation uses the traditional point to plane objective
- * and computes point to plane distances using the normals of the target point
- * cloud. It also provides the option (through setUseSymmetricObjective) of
- * using the symmetric objective function of [Rusinkiewicz 2019]. This objective
- * uses the normals of both the source and target point cloud and has a similar
- * computational cost to the traditional point to plane objective while also
- * offering improved convergence speed and a wider basin of convergence.
- *
- * Note that this implementation not demean the point clouds which can lead
- * to increased numerical error. If desired, a user can demean the point cloud,
- * run iterative closest point, and composite the resulting ICP transformation
- * with the translations from demeaning to obtain a transformation between
- * the original point clouds.
- *
- * \author Radu B. Rusu, Matthew Cong
- * \ingroup registration
- */
- template <typename PointSource, typename PointTarget, typename Scalar = float>
- class IterativeClosestPointWithNormals : public IterativeClosestPoint<PointSource, PointTarget, Scalar>
+ /**
+ * \brief Due to `convergence_criteria_` holding references to the class members,
+ * it is tricky to correctly implement its copy and move operations correctly. This
+ * can result in subtle bugs and to prevent them, these operations for ICP have
+ * been disabled.
+ *
+ * \todo: remove deleted ctors and assignments operations after resolving the issue
+ */
+ IterativeClosestPoint(const IterativeClosestPoint&) = delete;
+ IterativeClosestPoint(IterativeClosestPoint&&) = delete;
+ IterativeClosestPoint&
+ operator=(const IterativeClosestPoint&) = delete;
+ IterativeClosestPoint&
+ operator=(IterativeClosestPoint&&) = delete;
+
+ /** \brief Empty destructor */
+ ~IterativeClosestPoint() {}
+
+ /** \brief Returns a pointer to the DefaultConvergenceCriteria used by the
+ * IterativeClosestPoint class. This allows to check the convergence state after the
+ * align() method as well as to configure DefaultConvergenceCriteria's parameters not
+ * available through the ICP API before the align() method is called. Please note that
+ * the align method sets max_iterations_, euclidean_fitness_epsilon_ and
+ * transformation_epsilon_ and therefore overrides the default / set values of the
+ * DefaultConvergenceCriteria instance. \return Pointer to the IterativeClosestPoint's
+ * DefaultConvergenceCriteria.
+ */
+ inline typename pcl::registration::DefaultConvergenceCriteria<Scalar>::Ptr
+ getConvergeCriteria()
+ {
+ return convergence_criteria_;
+ }
+
+ /** \brief Provide a pointer to the input source
+ * (e.g., the point cloud that we want to align to the target)
+ *
+ * \param[in] cloud the input point cloud source
+ */
+ void
+ setInputSource(const PointCloudSourceConstPtr& cloud) override
{
- public:
- using PointCloudSource = typename IterativeClosestPoint<PointSource, PointTarget, Scalar>::PointCloudSource;
- using PointCloudTarget = typename IterativeClosestPoint<PointSource, PointTarget, Scalar>::PointCloudTarget;
- using Matrix4 = typename IterativeClosestPoint<PointSource, PointTarget, Scalar>::Matrix4;
-
- using IterativeClosestPoint<PointSource, PointTarget, Scalar>::reg_name_;
- using IterativeClosestPoint<PointSource, PointTarget, Scalar>::transformation_estimation_;
- using IterativeClosestPoint<PointSource, PointTarget, Scalar>::correspondence_rejectors_;
-
- using Ptr = shared_ptr<IterativeClosestPoint<PointSource, PointTarget, Scalar> >;
- using ConstPtr = shared_ptr<const IterativeClosestPoint<PointSource, PointTarget, Scalar> >;
-
- /** \brief Empty constructor. */
- IterativeClosestPointWithNormals ()
- {
- reg_name_ = "IterativeClosestPointWithNormals";
- setUseSymmetricObjective (false);
- setEnforceSameDirectionNormals (true);
- //correspondence_rejectors_.add
- };
-
- /** \brief Empty destructor */
- virtual ~IterativeClosestPointWithNormals () {}
-
- /** \brief Set whether to use a symmetric objective function or not
- *
- * \param[in] use_symmetric_objective whether to use a symmetric objective function or not
- */
- inline void
- setUseSymmetricObjective (bool use_symmetric_objective)
- {
- use_symmetric_objective_ = use_symmetric_objective;
- if (use_symmetric_objective_)
- {
- auto symmetric_transformation_estimation = pcl::make_shared<pcl::registration::TransformationEstimationSymmetricPointToPlaneLLS<PointSource, PointTarget, Scalar> > ();
- symmetric_transformation_estimation->setEnforceSameDirectionNormals (enforce_same_direction_normals_);
- transformation_estimation_ = symmetric_transformation_estimation;
- }
- else
- {
- transformation_estimation_.reset (new pcl::registration::TransformationEstimationPointToPlaneLLS<PointSource, PointTarget, Scalar> ());
- }
+ Registration<PointSource, PointTarget, Scalar>::setInputSource(cloud);
+ const auto fields = pcl::getFields<PointSource>();
+ source_has_normals_ = false;
+ for (const auto& field : fields) {
+ if (field.name == "x")
+ x_idx_offset_ = field.offset;
+ else if (field.name == "y")
+ y_idx_offset_ = field.offset;
+ else if (field.name == "z")
+ z_idx_offset_ = field.offset;
+ else if (field.name == "normal_x") {
+ source_has_normals_ = true;
+ nx_idx_offset_ = field.offset;
}
-
- /** \brief Obtain whether a symmetric objective is used or not */
- inline bool
- getUseSymmetricObjective () const
- {
- return use_symmetric_objective_;
+ else if (field.name == "normal_y") {
+ source_has_normals_ = true;
+ ny_idx_offset_ = field.offset;
}
-
- /** \brief Set whether or not to negate source or target normals on a per-point basis such that they point in the same direction. Only applicable to the symmetric objective function.
- *
- * \param[in] enforce_same_direction_normals whether to negate source or target normals on a per-point basis such that they point in the same direction.
- */
- inline void
- setEnforceSameDirectionNormals (bool enforce_same_direction_normals)
- {
- enforce_same_direction_normals_ = enforce_same_direction_normals;
- auto symmetric_transformation_estimation = dynamic_pointer_cast<pcl::registration::TransformationEstimationSymmetricPointToPlaneLLS<PointSource, PointTarget, Scalar> >(transformation_estimation_);
- if (symmetric_transformation_estimation)
- symmetric_transformation_estimation->setEnforceSameDirectionNormals (enforce_same_direction_normals_);
+ else if (field.name == "normal_z") {
+ source_has_normals_ = true;
+ nz_idx_offset_ = field.offset;
}
-
- /** \brief Obtain whether source or target normals are negated on a per-point basis such that they point in the same direction or not */
- inline bool
- getEnforceSameDirectionNormals () const
- {
- return enforce_same_direction_normals_;
+ }
+ }
+
+ /** \brief Provide a pointer to the input target
+ * (e.g., the point cloud that we want to align to the target)
+ *
+ * \param[in] cloud the input point cloud target
+ */
+ void
+ setInputTarget(const PointCloudTargetConstPtr& cloud) override
+ {
+ Registration<PointSource, PointTarget, Scalar>::setInputTarget(cloud);
+ const auto fields = pcl::getFields<PointSource>();
+ target_has_normals_ = false;
+ for (const auto& field : fields) {
+ if (field.name == "normal_x" || field.name == "normal_y" ||
+ field.name == "normal_z") {
+ target_has_normals_ = true;
+ break;
}
+ }
+ }
+
+ /** \brief Set whether to use reciprocal correspondence or not
+ *
+ * \param[in] use_reciprocal_correspondence whether to use reciprocal correspondence
+ * or not
+ */
+ inline void
+ setUseReciprocalCorrespondences(bool use_reciprocal_correspondence)
+ {
+ use_reciprocal_correspondence_ = use_reciprocal_correspondence;
+ }
- protected:
-
- /** \brief Apply a rigid transform to a given dataset
- * \param[in] input the input point cloud
- * \param[out] output the resultant output point cloud
- * \param[in] transform a 4x4 rigid transformation
- * \note Can be used with cloud_in equal to cloud_out
- */
- virtual void
- transformCloud (const PointCloudSource &input,
- PointCloudSource &output,
- const Matrix4 &transform);
-
- /** \brief Type of objective function (asymmetric vs. symmetric) used for transform estimation */
- bool use_symmetric_objective_;
- /** \brief Whether or not to negate source and/or target normals such that they point in the same direction in the symmetric objective function */
- bool enforce_same_direction_normals_;
+ /** \brief Obtain whether reciprocal correspondence are used or not */
+ inline bool
+ getUseReciprocalCorrespondences() const
+ {
+ return (use_reciprocal_correspondence_);
+ }
+
+protected:
+ /** \brief Apply a rigid transform to a given dataset. Here we check whether
+ * the dataset has surface normals in addition to XYZ, and rotate normals as well.
+ * \param[in] input the input point cloud
+ * \param[out] output the resultant output point cloud
+ * \param[in] transform a 4x4 rigid transformation
+ * \note Can be used with cloud_in equal to cloud_out
+ */
+ virtual void
+ transformCloud(const PointCloudSource& input,
+ PointCloudSource& output,
+ const Matrix4& transform);
+
+ /** \brief Rigid transformation computation method with initial guess.
+ * \param output the transformed input point cloud dataset using the rigid
+ * transformation found \param guess the initial guess of the transformation to
+ * compute
+ */
+ void
+ computeTransformation(PointCloudSource& output, const Matrix4& guess) override;
+
+ /** \brief Looks at the Estimators and Rejectors and determines whether their
+ * blob-setter methods need to be called */
+ virtual void
+ determineRequiredBlobData();
+
+ /** \brief XYZ fields offset. */
+ std::size_t x_idx_offset_, y_idx_offset_, z_idx_offset_;
+
+ /** \brief Normal fields offset. */
+ std::size_t nx_idx_offset_, ny_idx_offset_, nz_idx_offset_;
+
+ /** \brief The correspondence type used for correspondence estimation. */
+ bool use_reciprocal_correspondence_;
+
+ /** \brief Internal check whether source dataset has normals or not. */
+ bool source_has_normals_;
+ /** \brief Internal check whether target dataset has normals or not. */
+ bool target_has_normals_;
+
+ /** \brief Checks for whether estimators and rejectors need various data */
+ bool need_source_blob_, need_target_blob_;
+};
+
+/** \brief @b IterativeClosestPointWithNormals is a special case of
+ * IterativeClosestPoint, that uses a transformation estimated based on
+ * Point to Plane distances by default.
+ *
+ * By default, this implementation uses the traditional point to plane objective
+ * and computes point to plane distances using the normals of the target point
+ * cloud. It also provides the option (through setUseSymmetricObjective) of
+ * using the symmetric objective function of [Rusinkiewicz 2019]. This objective
+ * uses the normals of both the source and target point cloud and has a similar
+ * computational cost to the traditional point to plane objective while also
+ * offering improved convergence speed and a wider basin of convergence.
+ *
+ * Note that this implementation not demean the point clouds which can lead
+ * to increased numerical error. If desired, a user can demean the point cloud,
+ * run iterative closest point, and composite the resulting ICP transformation
+ * with the translations from demeaning to obtain a transformation between
+ * the original point clouds.
+ *
+ * \author Radu B. Rusu, Matthew Cong
+ * \ingroup registration
+ */
+template <typename PointSource, typename PointTarget, typename Scalar = float>
+class IterativeClosestPointWithNormals
+: public IterativeClosestPoint<PointSource, PointTarget, Scalar> {
+public:
+ using PointCloudSource = typename IterativeClosestPoint<PointSource,
+ PointTarget,
+ Scalar>::PointCloudSource;
+ using PointCloudTarget = typename IterativeClosestPoint<PointSource,
+ PointTarget,
+ Scalar>::PointCloudTarget;
+ using Matrix4 =
+ typename IterativeClosestPoint<PointSource, PointTarget, Scalar>::Matrix4;
+
+ using IterativeClosestPoint<PointSource, PointTarget, Scalar>::reg_name_;
+ using IterativeClosestPoint<PointSource, PointTarget, Scalar>::
+ transformation_estimation_;
+ using IterativeClosestPoint<PointSource, PointTarget, Scalar>::
+ correspondence_rejectors_;
+
+ using Ptr = shared_ptr<IterativeClosestPoint<PointSource, PointTarget, Scalar>>;
+ using ConstPtr =
+ shared_ptr<const IterativeClosestPoint<PointSource, PointTarget, Scalar>>;
+
+ /** \brief Empty constructor. */
+ IterativeClosestPointWithNormals()
+ {
+ reg_name_ = "IterativeClosestPointWithNormals";
+ setUseSymmetricObjective(false);
+ setEnforceSameDirectionNormals(true);
+ // correspondence_rejectors_.add
};
-}
+ /** \brief Empty destructor */
+ virtual ~IterativeClosestPointWithNormals() {}
+
+ /** \brief Set whether to use a symmetric objective function or not
+ *
+ * \param[in] use_symmetric_objective whether to use a symmetric objective function or
+ * not
+ */
+ inline void
+ setUseSymmetricObjective(bool use_symmetric_objective)
+ {
+ use_symmetric_objective_ = use_symmetric_objective;
+ if (use_symmetric_objective_) {
+ auto symmetric_transformation_estimation = pcl::make_shared<
+ pcl::registration::TransformationEstimationSymmetricPointToPlaneLLS<
+ PointSource,
+ PointTarget,
+ Scalar>>();
+ symmetric_transformation_estimation->setEnforceSameDirectionNormals(
+ enforce_same_direction_normals_);
+ transformation_estimation_ = symmetric_transformation_estimation;
+ }
+ else {
+ transformation_estimation_.reset(
+ new pcl::registration::TransformationEstimationPointToPlaneLLS<PointSource,
+ PointTarget,
+ Scalar>());
+ }
+ }
+
+ /** \brief Obtain whether a symmetric objective is used or not */
+ inline bool
+ getUseSymmetricObjective() const
+ {
+ return use_symmetric_objective_;
+ }
+
+ /** \brief Set whether or not to negate source or target normals on a per-point basis
+ * such that they point in the same direction. Only applicable to the symmetric
+ * objective function.
+ *
+ * \param[in] enforce_same_direction_normals whether to negate source or target
+ * normals on a per-point basis such that they point in the same direction.
+ */
+ inline void
+ setEnforceSameDirectionNormals(bool enforce_same_direction_normals)
+ {
+ enforce_same_direction_normals_ = enforce_same_direction_normals;
+ auto symmetric_transformation_estimation = dynamic_pointer_cast<
+ pcl::registration::TransformationEstimationSymmetricPointToPlaneLLS<PointSource,
+ PointTarget,
+ Scalar>>(
+ transformation_estimation_);
+ if (symmetric_transformation_estimation)
+ symmetric_transformation_estimation->setEnforceSameDirectionNormals(
+ enforce_same_direction_normals_);
+ }
+
+ /** \brief Obtain whether source or target normals are negated on a per-point basis
+ * such that they point in the same direction or not */
+ inline bool
+ getEnforceSameDirectionNormals() const
+ {
+ return enforce_same_direction_normals_;
+ }
+
+protected:
+ /** \brief Apply a rigid transform to a given dataset
+ * \param[in] input the input point cloud
+ * \param[out] output the resultant output point cloud
+ * \param[in] transform a 4x4 rigid transformation
+ * \note Can be used with cloud_in equal to cloud_out
+ */
+ virtual void
+ transformCloud(const PointCloudSource& input,
+ PointCloudSource& output,
+ const Matrix4& transform);
+
+ /** \brief Type of objective function (asymmetric vs. symmetric) used for transform
+ * estimation */
+ bool use_symmetric_objective_;
+ /** \brief Whether or not to negate source and/or target normals such that they point
+ * in the same direction in the symmetric objective function */
+ bool enforce_same_direction_normals_;
+};
+
+} // namespace pcl
#include <pcl/registration/impl/icp.hpp>
#include <pcl/registration/icp.h>
#include <pcl/registration/transformation_estimation_lm.h>
-namespace pcl
-{
- /** \brief @b IterativeClosestPointNonLinear is an ICP variant that uses Levenberg-Marquardt optimization
- * backend. The resultant transformation is optimized as a quaternion.
- *
- * The algorithm has several termination criteria:
- *
- * <ol>
- * <li>Number of iterations has reached the maximum user imposed number of iterations
- * (via \ref setMaximumIterations)</li>
- * <li>The epsilon (difference) between the previous transformation and the current estimated transformation is
- * smaller than an user imposed value (via \ref setTransformationEpsilon)</li>
- * <li>The sum of Euclidean squared errors is smaller than a user defined threshold
- * (via \ref setEuclideanFitnessEpsilon)</li>
- * </ol>
- *
- * \author Radu B. Rusu, Michael Dixon
- * \ingroup registration
- */
- template <typename PointSource, typename PointTarget, typename Scalar = float>
- class IterativeClosestPointNonLinear : public IterativeClosestPoint<PointSource, PointTarget, Scalar>
- {
- using IterativeClosestPoint<PointSource, PointTarget, Scalar>::min_number_correspondences_;
- using IterativeClosestPoint<PointSource, PointTarget, Scalar>::reg_name_;
- using IterativeClosestPoint<PointSource, PointTarget, Scalar>::transformation_estimation_;
- using IterativeClosestPoint<PointSource, PointTarget, Scalar>::computeTransformation;
-
- public:
+namespace pcl {
+/** \brief @b IterativeClosestPointNonLinear is an ICP variant that uses
+ * Levenberg-Marquardt optimization backend. The resultant transformation is optimized
+ * as a quaternion.
+ *
+ * The algorithm has several termination criteria:
+ *
+ * <ol>
+ * <li>Number of iterations has reached the maximum user imposed number of iterations
+ * (via \ref setMaximumIterations)</li>
+ * <li>The epsilon (difference) between the previous transformation and the current
+ * estimated transformation is smaller than an user imposed value (via \ref
+ * setTransformationEpsilon)</li> <li>The sum of Euclidean squared errors is smaller
+ * than a user defined threshold (via \ref setEuclideanFitnessEpsilon)</li>
+ * </ol>
+ *
+ * \author Radu B. Rusu, Michael Dixon
+ * \ingroup registration
+ */
+template <typename PointSource, typename PointTarget, typename Scalar = float>
+class IterativeClosestPointNonLinear
+: public IterativeClosestPoint<PointSource, PointTarget, Scalar> {
+ using IterativeClosestPoint<PointSource, PointTarget, Scalar>::
+ min_number_correspondences_;
+ using IterativeClosestPoint<PointSource, PointTarget, Scalar>::reg_name_;
+ using IterativeClosestPoint<PointSource, PointTarget, Scalar>::
+ transformation_estimation_;
+ using IterativeClosestPoint<PointSource, PointTarget, Scalar>::computeTransformation;
- using Ptr = shared_ptr< IterativeClosestPointNonLinear<PointSource, PointTarget, Scalar> >;
- using ConstPtr = shared_ptr< const IterativeClosestPointNonLinear<PointSource, PointTarget, Scalar> >;
+public:
+ using Ptr =
+ shared_ptr<IterativeClosestPointNonLinear<PointSource, PointTarget, Scalar>>;
+ using ConstPtr = shared_ptr<
+ const IterativeClosestPointNonLinear<PointSource, PointTarget, Scalar>>;
- using Matrix4 = typename Registration<PointSource, PointTarget, Scalar>::Matrix4;
+ using Matrix4 = typename Registration<PointSource, PointTarget, Scalar>::Matrix4;
- /** \brief Empty constructor. */
- IterativeClosestPointNonLinear ()
- {
- min_number_correspondences_ = 4;
- reg_name_ = "IterativeClosestPointNonLinear";
+ /** \brief Empty constructor. */
+ IterativeClosestPointNonLinear()
+ {
+ min_number_correspondences_ = 4;
+ reg_name_ = "IterativeClosestPointNonLinear";
- transformation_estimation_.reset (new pcl::registration::TransformationEstimationLM<PointSource, PointTarget, Scalar>);
- }
- };
-}
+ transformation_estimation_.reset(
+ new pcl::registration::
+ TransformationEstimationLM<PointSource, PointTarget, Scalar>);
+ }
+};
+} // namespace pcl
#ifndef PCL_REGISTRATION_IMPL_CORRESPONDENCE_ESTIMATION_H_
#define PCL_REGISTRATION_IMPL_CORRESPONDENCE_ESTIMATION_H_
-#include <pcl/common/io.h>
#include <pcl/common/copy_point.h>
+#include <pcl/common/io.h>
+namespace pcl {
-namespace pcl
-{
-
-namespace registration
-{
+namespace registration {
-template <typename PointSource, typename PointTarget, typename Scalar> void
-CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::setInputTarget (
- const PointCloudTargetConstPtr &cloud)
+template <typename PointSource, typename PointTarget, typename Scalar>
+void
+CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::setInputTarget(
+ const PointCloudTargetConstPtr& cloud)
{
- if (cloud->points.empty ())
- {
- PCL_ERROR ("[pcl::registration::%s::setInputTarget] Invalid or empty point cloud dataset given!\n", getClassName ().c_str ());
+ if (cloud->points.empty()) {
+ PCL_ERROR("[pcl::registration::%s::setInputTarget] Invalid or empty point cloud "
+ "dataset given!\n",
+ getClassName().c_str());
return;
}
target_ = cloud;
// Set the internal point representation of choice
if (point_representation_)
- tree_->setPointRepresentation (point_representation_);
+ tree_->setPointRepresentation(point_representation_);
target_cloud_updated_ = true;
}
-
-template <typename PointSource, typename PointTarget, typename Scalar> bool
-CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::initCompute ()
+template <typename PointSource, typename PointTarget, typename Scalar>
+bool
+CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::initCompute()
{
- if (!target_)
- {
- PCL_ERROR ("[pcl::registration::%s::compute] No input target dataset was given!\n", getClassName ().c_str ());
+ if (!target_) {
+ PCL_ERROR("[pcl::registration::%s::compute] No input target dataset was given!\n",
+ getClassName().c_str());
return (false);
}
// Only update target kd-tree if a new target cloud was set
- if (target_cloud_updated_ && !force_no_recompute_)
- {
+ if (target_cloud_updated_ && !force_no_recompute_) {
// If the target indices have been given via setIndicesTarget
if (target_indices_)
- tree_->setInputCloud (target_, target_indices_);
+ tree_->setInputCloud(target_, target_indices_);
else
- tree_->setInputCloud (target_);
+ tree_->setInputCloud(target_);
target_cloud_updated_ = false;
}
- return (PCLBase<PointSource>::initCompute ());
+ return (PCLBase<PointSource>::initCompute());
}
-
-template <typename PointSource, typename PointTarget, typename Scalar> bool
-CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::initComputeReciprocal ()
+template <typename PointSource, typename PointTarget, typename Scalar>
+bool
+CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::initComputeReciprocal()
{
// Only update source kd-tree if a new target cloud was set
- if (source_cloud_updated_ && !force_no_recompute_reciprocal_)
- {
+ if (source_cloud_updated_ && !force_no_recompute_reciprocal_) {
if (point_representation_)
- tree_reciprocal_->setPointRepresentation (point_representation_);
+ tree_reciprocal_->setPointRepresentation(point_representation_);
// If the target indices have been given via setIndicesTarget
if (indices_)
- tree_reciprocal_->setInputCloud (getInputSource(), getIndicesSource());
+ tree_reciprocal_->setInputCloud(getInputSource(), getIndicesSource());
else
- tree_reciprocal_->setInputCloud (getInputSource());
+ tree_reciprocal_->setInputCloud(getInputSource());
source_cloud_updated_ = false;
}
return (true);
}
-
-template <typename PointSource, typename PointTarget, typename Scalar> void
-CorrespondenceEstimation<PointSource, PointTarget, Scalar>::determineCorrespondences (
- pcl::Correspondences &correspondences, double max_distance)
+template <typename PointSource, typename PointTarget, typename Scalar>
+void
+CorrespondenceEstimation<PointSource, PointTarget, Scalar>::determineCorrespondences(
+ pcl::Correspondences& correspondences, double max_distance)
{
- if (!initCompute ())
+ if (!initCompute())
return;
double max_dist_sqr = max_distance * max_distance;
- correspondences.resize (indices_->size ());
+ correspondences.resize(indices_->size());
- std::vector<int> index (1);
- std::vector<float> distance (1);
+ pcl::Indices index(1);
+ std::vector<float> distance(1);
pcl::Correspondence corr;
unsigned int nr_valid_correspondences = 0;
// Check if the template types are the same. If true, avoid a copy.
- // Both point types MUST be registered using the POINT_CLOUD_REGISTER_POINT_STRUCT macro!
- if (isSamePointType<PointSource, PointTarget> ())
- {
+ // Both point types MUST be registered using the POINT_CLOUD_REGISTER_POINT_STRUCT
+ // macro!
+ if (isSamePointType<PointSource, PointTarget>()) {
// Iterate over the input set of source indices
- for (std::vector<int>::const_iterator idx = indices_->begin (); idx != indices_->end (); ++idx)
- {
- tree_->nearestKSearch ((*input_)[*idx], 1, index, distance);
+ for (const auto& idx : (*indices_)) {
+ tree_->nearestKSearch((*input_)[idx], 1, index, distance);
if (distance[0] > max_dist_sqr)
continue;
- corr.index_query = *idx;
+ corr.index_query = idx;
corr.index_match = index[0];
corr.distance = distance[0];
correspondences[nr_valid_correspondences++] = corr;
}
}
- else
- {
+ else {
PointTarget pt;
// Iterate over the input set of source indices
- for (std::vector<int>::const_iterator idx = indices_->begin (); idx != indices_->end (); ++idx)
- {
- // Copy the source data to a target PointTarget format so we can search in the tree
- copyPoint ((*input_)[*idx], pt);
+ for (const auto& idx : (*indices_)) {
+ // Copy the source data to a target PointTarget format so we can search in the
+ // tree
+ copyPoint((*input_)[idx], pt);
- tree_->nearestKSearch (pt, 1, index, distance);
+ tree_->nearestKSearch(pt, 1, index, distance);
if (distance[0] > max_dist_sqr)
continue;
- corr.index_query = *idx;
+ corr.index_query = idx;
corr.index_match = index[0];
corr.distance = distance[0];
correspondences[nr_valid_correspondences++] = corr;
}
}
- correspondences.resize (nr_valid_correspondences);
- deinitCompute ();
+ correspondences.resize(nr_valid_correspondences);
+ deinitCompute();
}
-
-template <typename PointSource, typename PointTarget, typename Scalar> void
-CorrespondenceEstimation<PointSource, PointTarget, Scalar>::determineReciprocalCorrespondences (
- pcl::Correspondences &correspondences, double max_distance)
+template <typename PointSource, typename PointTarget, typename Scalar>
+void
+CorrespondenceEstimation<PointSource, PointTarget, Scalar>::
+ determineReciprocalCorrespondences(pcl::Correspondences& correspondences,
+ double max_distance)
{
- if (!initCompute ())
+ if (!initCompute())
return;
// setup tree for reciprocal search
return;
double max_dist_sqr = max_distance * max_distance;
- correspondences.resize (indices_->size());
- std::vector<int> index (1);
- std::vector<float> distance (1);
- std::vector<int> index_reciprocal (1);
- std::vector<float> distance_reciprocal (1);
+ correspondences.resize(indices_->size());
+ pcl::Indices index(1);
+ std::vector<float> distance(1);
+ pcl::Indices index_reciprocal(1);
+ std::vector<float> distance_reciprocal(1);
pcl::Correspondence corr;
unsigned int nr_valid_correspondences = 0;
int target_idx = 0;
// Check if the template types are the same. If true, avoid a copy.
- // Both point types MUST be registered using the POINT_CLOUD_REGISTER_POINT_STRUCT macro!
- if (isSamePointType<PointSource, PointTarget> ())
- {
+ // Both point types MUST be registered using the POINT_CLOUD_REGISTER_POINT_STRUCT
+ // macro!
+ if (isSamePointType<PointSource, PointTarget>()) {
// Iterate over the input set of source indices
- for (std::vector<int>::const_iterator idx = indices_->begin (); idx != indices_->end (); ++idx)
- {
- tree_->nearestKSearch ((*input_)[*idx], 1, index, distance);
+ for (const auto& idx : (*indices_)) {
+ tree_->nearestKSearch((*input_)[idx], 1, index, distance);
if (distance[0] > max_dist_sqr)
continue;
target_idx = index[0];
- tree_reciprocal_->nearestKSearch ((*target_)[target_idx], 1, index_reciprocal, distance_reciprocal);
- if (distance_reciprocal[0] > max_dist_sqr || *idx != index_reciprocal[0])
+ tree_reciprocal_->nearestKSearch(
+ (*target_)[target_idx], 1, index_reciprocal, distance_reciprocal);
+ if (distance_reciprocal[0] > max_dist_sqr || idx != index_reciprocal[0])
continue;
- corr.index_query = *idx;
+ corr.index_query = idx;
corr.index_match = index[0];
corr.distance = distance[0];
correspondences[nr_valid_correspondences++] = corr;
}
}
- else
- {
+ else {
PointTarget pt_src;
PointSource pt_tgt;
// Iterate over the input set of source indices
- for (std::vector<int>::const_iterator idx = indices_->begin (); idx != indices_->end (); ++idx)
- {
- // Copy the source data to a target PointTarget format so we can search in the tree
- copyPoint ((*input_)[*idx], pt_src);
+ for (const auto& idx : (*indices_)) {
+ // Copy the source data to a target PointTarget format so we can search in the
+ // tree
+ copyPoint((*input_)[idx], pt_src);
- tree_->nearestKSearch (pt_src, 1, index, distance);
+ tree_->nearestKSearch(pt_src, 1, index, distance);
if (distance[0] > max_dist_sqr)
continue;
target_idx = index[0];
- // Copy the target data to a target PointSource format so we can search in the tree_reciprocal
- copyPoint ((*target_)[target_idx], pt_tgt);
+ // Copy the target data to a target PointSource format so we can search in the
+ // tree_reciprocal
+ copyPoint((*target_)[target_idx], pt_tgt);
- tree_reciprocal_->nearestKSearch (pt_tgt, 1, index_reciprocal, distance_reciprocal);
- if (distance_reciprocal[0] > max_dist_sqr || *idx != index_reciprocal[0])
+ tree_reciprocal_->nearestKSearch(
+ pt_tgt, 1, index_reciprocal, distance_reciprocal);
+ if (distance_reciprocal[0] > max_dist_sqr || idx != index_reciprocal[0])
continue;
- corr.index_query = *idx;
+ corr.index_query = idx;
corr.index_match = index[0];
corr.distance = distance[0];
correspondences[nr_valid_correspondences++] = corr;
}
}
- correspondences.resize (nr_valid_correspondences);
- deinitCompute ();
+ correspondences.resize(nr_valid_correspondences);
+ deinitCompute();
}
} // namespace registration
} // namespace pcl
-//#define PCL_INSTANTIATE_CorrespondenceEstimation(T,U) template class PCL_EXPORTS pcl::registration::CorrespondenceEstimation<T,U>;
+//#define PCL_INSTANTIATE_CorrespondenceEstimation(T,U) template class PCL_EXPORTS
+// pcl::registration::CorrespondenceEstimation<T,U>;
#endif /* PCL_REGISTRATION_IMPL_CORRESPONDENCE_ESTIMATION_H_ */
-
#include <pcl/common/copy_point.h>
+namespace pcl {
-namespace pcl
-{
-
-namespace registration
-{
+namespace registration {
-template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar> bool
-CorrespondenceEstimationBackProjection<PointSource, PointTarget, NormalT, Scalar>::initCompute ()
+template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar>
+bool
+CorrespondenceEstimationBackProjection<PointSource, PointTarget, NormalT, Scalar>::
+ initCompute()
{
- if (!source_normals_ || !target_normals_)
- {
- PCL_WARN ("[pcl::registration::%s::initCompute] Datasets containing normals for source/target have not been given!\n", getClassName ().c_str ());
+ if (!source_normals_ || !target_normals_) {
+ PCL_WARN("[pcl::registration::%s::initCompute] Datasets containing normals for "
+ "source/target have not been given!\n",
+ getClassName().c_str());
return (false);
}
- return (CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::initCompute ());
+ return (
+ CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::initCompute());
}
///////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar> void
-CorrespondenceEstimationBackProjection<PointSource, PointTarget, NormalT, Scalar>::determineCorrespondences (
- pcl::Correspondences &correspondences, double max_distance)
+template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar>
+void
+CorrespondenceEstimationBackProjection<PointSource, PointTarget, NormalT, Scalar>::
+ determineCorrespondences(pcl::Correspondences& correspondences, double max_distance)
{
- if (!initCompute ())
+ if (!initCompute())
return;
- correspondences.resize (indices_->size ());
+ correspondences.resize(indices_->size());
- std::vector<int> nn_indices (k_);
- std::vector<float> nn_dists (k_);
+ pcl::Indices nn_indices(k_);
+ std::vector<float> nn_dists(k_);
int min_index = 0;
unsigned int nr_valid_correspondences = 0;
// Check if the template types are the same. If true, avoid a copy.
- // Both point types MUST be registered using the POINT_CLOUD_REGISTER_POINT_STRUCT macro!
- if (isSamePointType<PointSource, PointTarget> ())
- {
+ // Both point types MUST be registered using the POINT_CLOUD_REGISTER_POINT_STRUCT
+ // macro!
+ if (isSamePointType<PointSource, PointTarget>()) {
PointTarget pt;
// Iterate over the input set of source indices
- for (std::vector<int>::const_iterator idx_i = indices_->begin (); idx_i != indices_->end (); ++idx_i)
- {
- tree_->nearestKSearch ((*input_)[*idx_i], k_, nn_indices, nn_dists);
+ for (const auto& idx_i : (*indices_)) {
+ tree_->nearestKSearch((*input_)[idx_i], k_, nn_indices, nn_dists);
- // Among the K nearest neighbours find the one with minimum perpendicular distance to the normal
- float min_dist = std::numeric_limits<float>::max ();
+ // Among the K nearest neighbours find the one with minimum perpendicular distance
+ // to the normal
+ float min_dist = std::numeric_limits<float>::max();
// Find the best correspondence
- for (std::size_t j = 0; j < nn_indices.size (); j++)
- {
- float cos_angle = (*source_normals_)[*idx_i].normal_x * (*target_normals_)[nn_indices[j]].normal_x +
- (*source_normals_)[*idx_i].normal_y * (*target_normals_)[nn_indices[j]].normal_y +
- (*source_normals_)[*idx_i].normal_z * (*target_normals_)[nn_indices[j]].normal_z ;
+ for (std::size_t j = 0; j < nn_indices.size(); j++) {
+ float cos_angle = (*source_normals_)[idx_i].normal_x *
+ (*target_normals_)[nn_indices[j]].normal_x +
+ (*source_normals_)[idx_i].normal_y *
+ (*target_normals_)[nn_indices[j]].normal_y +
+ (*source_normals_)[idx_i].normal_z *
+ (*target_normals_)[nn_indices[j]].normal_z;
float dist = nn_dists[j] * (2.0f - cos_angle * cos_angle);
- if (dist < min_dist)
- {
+ if (dist < min_dist) {
min_dist = dist;
- min_index = static_cast<int> (j);
+ min_index = static_cast<int>(j);
}
}
if (min_dist > max_distance)
continue;
- corr.index_query = *idx_i;
+ corr.index_query = idx_i;
corr.index_match = nn_indices[min_index];
- corr.distance = nn_dists[min_index];//min_dist;
+ corr.distance = nn_dists[min_index]; // min_dist;
correspondences[nr_valid_correspondences++] = corr;
}
}
- else
- {
+ else {
PointTarget pt;
// Iterate over the input set of source indices
- for (std::vector<int>::const_iterator idx_i = indices_->begin (); idx_i != indices_->end (); ++idx_i)
- {
- tree_->nearestKSearch ((*input_)[*idx_i], k_, nn_indices, nn_dists);
+ for (const auto& idx_i : (*indices_)) {
+ tree_->nearestKSearch((*input_)[idx_i], k_, nn_indices, nn_dists);
- // Among the K nearest neighbours find the one with minimum perpendicular distance to the normal
- float min_dist = std::numeric_limits<float>::max ();
+ // Among the K nearest neighbours find the one with minimum perpendicular distance
+ // to the normal
+ float min_dist = std::numeric_limits<float>::max();
// Find the best correspondence
- for (std::size_t j = 0; j < nn_indices.size (); j++)
- {
+ for (std::size_t j = 0; j < nn_indices.size(); j++) {
PointSource pt_src;
- // Copy the source data to a target PointTarget format so we can search in the tree
- copyPoint ((*input_)[*idx_i], pt_src);
-
- float cos_angle = (*source_normals_)[*idx_i].normal_x * (*target_normals_)[nn_indices[j]].normal_x +
- (*source_normals_)[*idx_i].normal_y * (*target_normals_)[nn_indices[j]].normal_y +
- (*source_normals_)[*idx_i].normal_z * (*target_normals_)[nn_indices[j]].normal_z ;
+ // Copy the source data to a target PointTarget format so we can search in the
+ // tree
+ copyPoint((*input_)[idx_i], pt_src);
+
+ float cos_angle = (*source_normals_)[idx_i].normal_x *
+ (*target_normals_)[nn_indices[j]].normal_x +
+ (*source_normals_)[idx_i].normal_y *
+ (*target_normals_)[nn_indices[j]].normal_y +
+ (*source_normals_)[idx_i].normal_z *
+ (*target_normals_)[nn_indices[j]].normal_z;
float dist = nn_dists[j] * (2.0f - cos_angle * cos_angle);
- if (dist < min_dist)
- {
+ if (dist < min_dist) {
min_dist = dist;
- min_index = static_cast<int> (j);
+ min_index = static_cast<int>(j);
}
}
if (min_dist > max_distance)
continue;
- corr.index_query = *idx_i;
+ corr.index_query = idx_i;
corr.index_match = nn_indices[min_index];
- corr.distance = nn_dists[min_index];//min_dist;
+ corr.distance = nn_dists[min_index]; // min_dist;
correspondences[nr_valid_correspondences++] = corr;
}
}
- correspondences.resize (nr_valid_correspondences);
- deinitCompute ();
+ correspondences.resize(nr_valid_correspondences);
+ deinitCompute();
}
-
-template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar> void
-CorrespondenceEstimationBackProjection<PointSource, PointTarget, NormalT, Scalar>::determineReciprocalCorrespondences (
- pcl::Correspondences &correspondences, double max_distance)
+template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar>
+void
+CorrespondenceEstimationBackProjection<PointSource, PointTarget, NormalT, Scalar>::
+ determineReciprocalCorrespondences(pcl::Correspondences& correspondences,
+ double max_distance)
{
- if (!initCompute ())
+ if (!initCompute())
return;
// Set the internal point representation of choice
- if(!initComputeReciprocal())
+ if (!initComputeReciprocal())
return;
- correspondences.resize (indices_->size ());
+ correspondences.resize(indices_->size());
- std::vector<int> nn_indices (k_);
- std::vector<float> nn_dists (k_);
- std::vector<int> index_reciprocal (1);
- std::vector<float> distance_reciprocal (1);
+ pcl::Indices nn_indices(k_);
+ std::vector<float> nn_dists(k_);
+ pcl::Indices index_reciprocal(1);
+ std::vector<float> distance_reciprocal(1);
int min_index = 0;
int target_idx = 0;
// Check if the template types are the same. If true, avoid a copy.
- // Both point types MUST be registered using the POINT_CLOUD_REGISTER_POINT_STRUCT macro!
- if (isSamePointType<PointSource, PointTarget> ())
- {
+ // Both point types MUST be registered using the POINT_CLOUD_REGISTER_POINT_STRUCT
+ // macro!
+ if (isSamePointType<PointSource, PointTarget>()) {
PointTarget pt;
// Iterate over the input set of source indices
- for (std::vector<int>::const_iterator idx_i = indices_->begin (); idx_i != indices_->end (); ++idx_i)
- {
- tree_->nearestKSearch ((*input_)[*idx_i], k_, nn_indices, nn_dists);
+ for (const auto& idx_i : (*indices_)) {
+ tree_->nearestKSearch((*input_)[idx_i], k_, nn_indices, nn_dists);
- // Among the K nearest neighbours find the one with minimum perpendicular distance to the normal
- float min_dist = std::numeric_limits<float>::max ();
+ // Among the K nearest neighbours find the one with minimum perpendicular distance
+ // to the normal
+ float min_dist = std::numeric_limits<float>::max();
// Find the best correspondence
- for (std::size_t j = 0; j < nn_indices.size (); j++)
- {
- float cos_angle = (*source_normals_)[*idx_i].normal_x * (*target_normals_)[nn_indices[j]].normal_x +
- (*source_normals_)[*idx_i].normal_y * (*target_normals_)[nn_indices[j]].normal_y +
- (*source_normals_)[*idx_i].normal_z * (*target_normals_)[nn_indices[j]].normal_z ;
+ for (std::size_t j = 0; j < nn_indices.size(); j++) {
+ float cos_angle = (*source_normals_)[idx_i].normal_x *
+ (*target_normals_)[nn_indices[j]].normal_x +
+ (*source_normals_)[idx_i].normal_y *
+ (*target_normals_)[nn_indices[j]].normal_y +
+ (*source_normals_)[idx_i].normal_z *
+ (*target_normals_)[nn_indices[j]].normal_z;
float dist = nn_dists[j] * (2.0f - cos_angle * cos_angle);
- if (dist < min_dist)
- {
+ if (dist < min_dist) {
min_dist = dist;
- min_index = static_cast<int> (j);
+ min_index = static_cast<int>(j);
}
}
if (min_dist > max_distance)
// Check if the correspondence is reciprocal
target_idx = nn_indices[min_index];
- tree_reciprocal_->nearestKSearch ((*target_)[target_idx], 1, index_reciprocal, distance_reciprocal);
+ tree_reciprocal_->nearestKSearch(
+ (*target_)[target_idx], 1, index_reciprocal, distance_reciprocal);
- if (*idx_i != index_reciprocal[0])
+ if (idx_i != index_reciprocal[0])
continue;
- corr.index_query = *idx_i;
+ corr.index_query = idx_i;
corr.index_match = nn_indices[min_index];
- corr.distance = nn_dists[min_index];//min_dist;
+ corr.distance = nn_dists[min_index]; // min_dist;
correspondences[nr_valid_correspondences++] = corr;
}
}
- else
- {
+ else {
PointTarget pt;
// Iterate over the input set of source indices
- for (std::vector<int>::const_iterator idx_i = indices_->begin (); idx_i != indices_->end (); ++idx_i)
- {
- tree_->nearestKSearch ((*input_)[*idx_i], k_, nn_indices, nn_dists);
+ for (const auto& idx_i : (*indices_)) {
+ tree_->nearestKSearch((*input_)[idx_i], k_, nn_indices, nn_dists);
- // Among the K nearest neighbours find the one with minimum perpendicular distance to the normal
- float min_dist = std::numeric_limits<float>::max ();
+ // Among the K nearest neighbours find the one with minimum perpendicular distance
+ // to the normal
+ float min_dist = std::numeric_limits<float>::max();
// Find the best correspondence
- for (std::size_t j = 0; j < nn_indices.size (); j++)
- {
+ for (std::size_t j = 0; j < nn_indices.size(); j++) {
PointSource pt_src;
- // Copy the source data to a target PointTarget format so we can search in the tree
- copyPoint ((*input_)[*idx_i], pt_src);
-
- float cos_angle = (*source_normals_)[*idx_i].normal_x * (*target_normals_)[nn_indices[j]].normal_x +
- (*source_normals_)[*idx_i].normal_y * (*target_normals_)[nn_indices[j]].normal_y +
- (*source_normals_)[*idx_i].normal_z * (*target_normals_)[nn_indices[j]].normal_z ;
+ // Copy the source data to a target PointTarget format so we can search in the
+ // tree
+ copyPoint((*input_)[idx_i], pt_src);
+
+ float cos_angle = (*source_normals_)[idx_i].normal_x *
+ (*target_normals_)[nn_indices[j]].normal_x +
+ (*source_normals_)[idx_i].normal_y *
+ (*target_normals_)[nn_indices[j]].normal_y +
+ (*source_normals_)[idx_i].normal_z *
+ (*target_normals_)[nn_indices[j]].normal_z;
float dist = nn_dists[j] * (2.0f - cos_angle * cos_angle);
- if (dist < min_dist)
- {
+ if (dist < min_dist) {
min_dist = dist;
- min_index = static_cast<int> (j);
+ min_index = static_cast<int>(j);
}
}
if (min_dist > max_distance)
// Check if the correspondence is reciprocal
target_idx = nn_indices[min_index];
- tree_reciprocal_->nearestKSearch ((*target_)[target_idx], 1, index_reciprocal, distance_reciprocal);
+ tree_reciprocal_->nearestKSearch(
+ (*target_)[target_idx], 1, index_reciprocal, distance_reciprocal);
- if (*idx_i != index_reciprocal[0])
+ if (idx_i != index_reciprocal[0])
continue;
- corr.index_query = *idx_i;
+ corr.index_query = idx_i;
corr.index_match = nn_indices[min_index];
- corr.distance = nn_dists[min_index];//min_dist;
+ corr.distance = nn_dists[min_index]; // min_dist;
correspondences[nr_valid_correspondences++] = corr;
}
}
- correspondences.resize (nr_valid_correspondences);
- deinitCompute ();
+ correspondences.resize(nr_valid_correspondences);
+ deinitCompute();
}
} // namespace registration
} // namespace pcl
-#endif // PCL_REGISTRATION_IMPL_CORRESPONDENCE_ESTIMATION_BACK_PROJECTION_HPP_
+#endif // PCL_REGISTRATION_IMPL_CORRESPONDENCE_ESTIMATION_BACK_PROJECTION_HPP_
#include <pcl/common/copy_point.h>
+namespace pcl {
-namespace pcl
-{
-
-namespace registration
-{
+namespace registration {
-template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar> bool
-CorrespondenceEstimationNormalShooting<PointSource, PointTarget, NormalT, Scalar>::initCompute ()
+template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar>
+bool
+CorrespondenceEstimationNormalShooting<PointSource, PointTarget, NormalT, Scalar>::
+ initCompute()
{
- if (!source_normals_)
- {
- PCL_WARN ("[pcl::registration::%s::initCompute] Datasets containing normals for source have not been given!\n", getClassName ().c_str ());
+ if (!source_normals_) {
+ PCL_WARN("[pcl::registration::%s::initCompute] Datasets containing normals for "
+ "source have not been given!\n",
+ getClassName().c_str());
return (false);
}
- return (CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::initCompute ());
+ return (
+ CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::initCompute());
}
-
-template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar> void
-CorrespondenceEstimationNormalShooting<PointSource, PointTarget, NormalT, Scalar>::determineCorrespondences (
- pcl::Correspondences &correspondences, double max_distance)
+template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar>
+void
+CorrespondenceEstimationNormalShooting<PointSource, PointTarget, NormalT, Scalar>::
+ determineCorrespondences(pcl::Correspondences& correspondences, double max_distance)
{
- if (!initCompute ())
+ if (!initCompute())
return;
- correspondences.resize (indices_->size ());
+ correspondences.resize(indices_->size());
- std::vector<int> nn_indices (k_);
- std::vector<float> nn_dists (k_);
+ pcl::Indices nn_indices(k_);
+ std::vector<float> nn_dists(k_);
int min_index = 0;
unsigned int nr_valid_correspondences = 0;
// Check if the template types are the same. If true, avoid a copy.
- // Both point types MUST be registered using the POINT_CLOUD_REGISTER_POINT_STRUCT macro!
- if (isSamePointType<PointSource, PointTarget> ())
- {
+ // Both point types MUST be registered using the POINT_CLOUD_REGISTER_POINT_STRUCT
+ // macro!
+ if (isSamePointType<PointSource, PointTarget>()) {
PointTarget pt;
// Iterate over the input set of source indices
- for (std::vector<int>::const_iterator idx_i = indices_->begin (); idx_i != indices_->end (); ++idx_i)
- {
- tree_->nearestKSearch ((*input_)[*idx_i], k_, nn_indices, nn_dists);
+ for (const auto& idx_i : (*indices_)) {
+ tree_->nearestKSearch((*input_)[idx_i], k_, nn_indices, nn_dists);
- // Among the K nearest neighbours find the one with minimum perpendicular distance to the normal
- double min_dist = std::numeric_limits<double>::max ();
+ // Among the K nearest neighbours find the one with minimum perpendicular distance
+ // to the normal
+ double min_dist = std::numeric_limits<double>::max();
// Find the best correspondence
- for (std::size_t j = 0; j < nn_indices.size (); j++)
- {
+ for (std::size_t j = 0; j < nn_indices.size(); j++) {
// computing the distance between a point and a line in 3d.
// Reference - http://mathworld.wolfram.com/Point-LineDistance3-Dimensional.html
- pt.x = (*target_)[nn_indices[j]].x - (*input_)[*idx_i].x;
- pt.y = (*target_)[nn_indices[j]].y - (*input_)[*idx_i].y;
- pt.z = (*target_)[nn_indices[j]].z - (*input_)[*idx_i].z;
+ pt.x = (*target_)[nn_indices[j]].x - (*input_)[idx_i].x;
+ pt.y = (*target_)[nn_indices[j]].y - (*input_)[idx_i].y;
+ pt.z = (*target_)[nn_indices[j]].z - (*input_)[idx_i].z;
- const NormalT &normal = (*source_normals_)[*idx_i];
- Eigen::Vector3d N (normal.normal_x, normal.normal_y, normal.normal_z);
- Eigen::Vector3d V (pt.x, pt.y, pt.z);
- Eigen::Vector3d C = N.cross (V);
+ const NormalT& normal = (*source_normals_)[idx_i];
+ Eigen::Vector3d N(normal.normal_x, normal.normal_y, normal.normal_z);
+ Eigen::Vector3d V(pt.x, pt.y, pt.z);
+ Eigen::Vector3d C = N.cross(V);
// Check if we have a better correspondence
- double dist = C.dot (C);
- if (dist < min_dist)
- {
+ double dist = C.dot(C);
+ if (dist < min_dist) {
min_dist = dist;
- min_index = static_cast<int> (j);
+ min_index = static_cast<int>(j);
}
}
if (min_dist > max_distance)
continue;
- corr.index_query = *idx_i;
+ corr.index_query = idx_i;
corr.index_match = nn_indices[min_index];
- corr.distance = nn_dists[min_index];//min_dist;
+ corr.distance = nn_dists[min_index]; // min_dist;
correspondences[nr_valid_correspondences++] = corr;
}
}
- else
- {
+ else {
PointTarget pt;
// Iterate over the input set of source indices
- for (std::vector<int>::const_iterator idx_i = indices_->begin (); idx_i != indices_->end (); ++idx_i)
- {
- tree_->nearestKSearch ((*input_)[*idx_i], k_, nn_indices, nn_dists);
+ for (const auto& idx_i : (*indices_)) {
+ tree_->nearestKSearch((*input_)[idx_i], k_, nn_indices, nn_dists);
- // Among the K nearest neighbours find the one with minimum perpendicular distance to the normal
- double min_dist = std::numeric_limits<double>::max ();
+ // Among the K nearest neighbours find the one with minimum perpendicular distance
+ // to the normal
+ double min_dist = std::numeric_limits<double>::max();
// Find the best correspondence
- for (std::size_t j = 0; j < nn_indices.size (); j++)
- {
+ for (std::size_t j = 0; j < nn_indices.size(); j++) {
PointSource pt_src;
- // Copy the source data to a target PointTarget format so we can search in the tree
- copyPoint ((*input_)[*idx_i], pt_src);
+ // Copy the source data to a target PointTarget format so we can search in the
+ // tree
+ copyPoint((*input_)[idx_i], pt_src);
- // computing the distance between a point and a line in 3d.
+ // computing the distance between a point and a line in 3d.
// Reference - http://mathworld.wolfram.com/Point-LineDistance3-Dimensional.html
pt.x = (*target_)[nn_indices[j]].x - pt_src.x;
pt.y = (*target_)[nn_indices[j]].y - pt_src.y;
pt.z = (*target_)[nn_indices[j]].z - pt_src.z;
- const NormalT &normal = (*source_normals_)[*idx_i];
- Eigen::Vector3d N (normal.normal_x, normal.normal_y, normal.normal_z);
- Eigen::Vector3d V (pt.x, pt.y, pt.z);
- Eigen::Vector3d C = N.cross (V);
+ const NormalT& normal = (*source_normals_)[idx_i];
+ Eigen::Vector3d N(normal.normal_x, normal.normal_y, normal.normal_z);
+ Eigen::Vector3d V(pt.x, pt.y, pt.z);
+ Eigen::Vector3d C = N.cross(V);
// Check if we have a better correspondence
- double dist = C.dot (C);
- if (dist < min_dist)
- {
+ double dist = C.dot(C);
+ if (dist < min_dist) {
min_dist = dist;
- min_index = static_cast<int> (j);
+ min_index = static_cast<int>(j);
}
}
if (min_dist > max_distance)
continue;
- corr.index_query = *idx_i;
+ corr.index_query = idx_i;
corr.index_match = nn_indices[min_index];
- corr.distance = nn_dists[min_index];//min_dist;
+ corr.distance = nn_dists[min_index]; // min_dist;
correspondences[nr_valid_correspondences++] = corr;
}
}
- correspondences.resize (nr_valid_correspondences);
- deinitCompute ();
+ correspondences.resize(nr_valid_correspondences);
+ deinitCompute();
}
-
-template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar> void
-CorrespondenceEstimationNormalShooting<PointSource, PointTarget, NormalT, Scalar>::determineReciprocalCorrespondences (
- pcl::Correspondences &correspondences, double max_distance)
+template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar>
+void
+CorrespondenceEstimationNormalShooting<PointSource, PointTarget, NormalT, Scalar>::
+ determineReciprocalCorrespondences(pcl::Correspondences& correspondences,
+ double max_distance)
{
- if (!initCompute ())
+ if (!initCompute())
return;
// setup tree for reciprocal search
// Set the internal point representation of choice
- if (!initComputeReciprocal ())
+ if (!initComputeReciprocal())
return;
- correspondences.resize (indices_->size ());
+ correspondences.resize(indices_->size());
- std::vector<int> nn_indices (k_);
- std::vector<float> nn_dists (k_);
- std::vector<int> index_reciprocal (1);
- std::vector<float> distance_reciprocal (1);
+ pcl::Indices nn_indices(k_);
+ std::vector<float> nn_dists(k_);
+ pcl::Indices index_reciprocal(1);
+ std::vector<float> distance_reciprocal(1);
int min_index = 0;
int target_idx = 0;
// Check if the template types are the same. If true, avoid a copy.
- // Both point types MUST be registered using the POINT_CLOUD_REGISTER_POINT_STRUCT macro!
- if (isSamePointType<PointSource, PointTarget> ())
- {
+ // Both point types MUST be registered using the POINT_CLOUD_REGISTER_POINT_STRUCT
+ // macro!
+ if (isSamePointType<PointSource, PointTarget>()) {
PointTarget pt;
// Iterate over the input set of source indices
- for (std::vector<int>::const_iterator idx_i = indices_->begin (); idx_i != indices_->end (); ++idx_i)
- {
- tree_->nearestKSearch ((*input_)[*idx_i], k_, nn_indices, nn_dists);
+ for (const auto& idx_i : (*indices_)) {
+ tree_->nearestKSearch((*input_)[idx_i], k_, nn_indices, nn_dists);
- // Among the K nearest neighbours find the one with minimum perpendicular distance to the normal
- double min_dist = std::numeric_limits<double>::max ();
+ // Among the K nearest neighbours find the one with minimum perpendicular distance
+ // to the normal
+ double min_dist = std::numeric_limits<double>::max();
// Find the best correspondence
- for (std::size_t j = 0; j < nn_indices.size (); j++)
- {
+ for (std::size_t j = 0; j < nn_indices.size(); j++) {
// computing the distance between a point and a line in 3d.
// Reference - http://mathworld.wolfram.com/Point-LineDistance3-Dimensional.html
- pt.x = (*target_)[nn_indices[j]].x - (*input_)[*idx_i].x;
- pt.y = (*target_)[nn_indices[j]].y - (*input_)[*idx_i].y;
- pt.z = (*target_)[nn_indices[j]].z - (*input_)[*idx_i].z;
+ pt.x = (*target_)[nn_indices[j]].x - (*input_)[idx_i].x;
+ pt.y = (*target_)[nn_indices[j]].y - (*input_)[idx_i].y;
+ pt.z = (*target_)[nn_indices[j]].z - (*input_)[idx_i].z;
- const NormalT &normal = (*source_normals_)[*idx_i];
- Eigen::Vector3d N (normal.normal_x, normal.normal_y, normal.normal_z);
- Eigen::Vector3d V (pt.x, pt.y, pt.z);
- Eigen::Vector3d C = N.cross (V);
+ const NormalT& normal = (*source_normals_)[idx_i];
+ Eigen::Vector3d N(normal.normal_x, normal.normal_y, normal.normal_z);
+ Eigen::Vector3d V(pt.x, pt.y, pt.z);
+ Eigen::Vector3d C = N.cross(V);
// Check if we have a better correspondence
- double dist = C.dot (C);
- if (dist < min_dist)
- {
+ double dist = C.dot(C);
+ if (dist < min_dist) {
min_dist = dist;
- min_index = static_cast<int> (j);
+ min_index = static_cast<int>(j);
}
}
if (min_dist > max_distance)
// Check if the correspondence is reciprocal
target_idx = nn_indices[min_index];
- tree_reciprocal_->nearestKSearch ((*target_)[target_idx], 1, index_reciprocal, distance_reciprocal);
+ tree_reciprocal_->nearestKSearch(
+ (*target_)[target_idx], 1, index_reciprocal, distance_reciprocal);
- if (*idx_i != index_reciprocal[0])
+ if (idx_i != index_reciprocal[0])
continue;
// Correspondence IS reciprocal, save it and continue
- corr.index_query = *idx_i;
+ corr.index_query = idx_i;
corr.index_match = nn_indices[min_index];
- corr.distance = nn_dists[min_index];//min_dist;
+ corr.distance = nn_dists[min_index]; // min_dist;
correspondences[nr_valid_correspondences++] = corr;
}
}
- else
- {
+ else {
PointTarget pt;
// Iterate over the input set of source indices
- for (std::vector<int>::const_iterator idx_i = indices_->begin (); idx_i != indices_->end (); ++idx_i)
- {
- tree_->nearestKSearch ((*input_)[*idx_i], k_, nn_indices, nn_dists);
+ for (const auto& idx_i : (*indices_)) {
+ tree_->nearestKSearch((*input_)[idx_i], k_, nn_indices, nn_dists);
- // Among the K nearest neighbours find the one with minimum perpendicular distance to the normal
- double min_dist = std::numeric_limits<double>::max ();
+ // Among the K nearest neighbours find the one with minimum perpendicular distance
+ // to the normal
+ double min_dist = std::numeric_limits<double>::max();
// Find the best correspondence
- for (std::size_t j = 0; j < nn_indices.size (); j++)
- {
+ for (std::size_t j = 0; j < nn_indices.size(); j++) {
PointSource pt_src;
- // Copy the source data to a target PointTarget format so we can search in the tree
- copyPoint ((*input_)[*idx_i], pt_src);
+ // Copy the source data to a target PointTarget format so we can search in the
+ // tree
+ copyPoint((*input_)[idx_i], pt_src);
// computing the distance between a point and a line in 3d.
// Reference - http://mathworld.wolfram.com/Point-LineDistance3-Dimensional.html
pt.y = (*target_)[nn_indices[j]].y - pt_src.y;
pt.z = (*target_)[nn_indices[j]].z - pt_src.z;
- const NormalT &normal = (*source_normals_)[*idx_i];
- Eigen::Vector3d N (normal.normal_x, normal.normal_y, normal.normal_z);
- Eigen::Vector3d V (pt.x, pt.y, pt.z);
- Eigen::Vector3d C = N.cross (V);
+ const NormalT& normal = (*source_normals_)[idx_i];
+ Eigen::Vector3d N(normal.normal_x, normal.normal_y, normal.normal_z);
+ Eigen::Vector3d V(pt.x, pt.y, pt.z);
+ Eigen::Vector3d C = N.cross(V);
// Check if we have a better correspondence
- double dist = C.dot (C);
- if (dist < min_dist)
- {
+ double dist = C.dot(C);
+ if (dist < min_dist) {
min_dist = dist;
- min_index = static_cast<int> (j);
+ min_index = static_cast<int>(j);
}
}
if (min_dist > max_distance)
// Check if the correspondence is reciprocal
target_idx = nn_indices[min_index];
- tree_reciprocal_->nearestKSearch ((*target_)[target_idx], 1, index_reciprocal, distance_reciprocal);
+ tree_reciprocal_->nearestKSearch(
+ (*target_)[target_idx], 1, index_reciprocal, distance_reciprocal);
- if (*idx_i != index_reciprocal[0])
+ if (idx_i != index_reciprocal[0])
continue;
// Correspondence IS reciprocal, save it and continue
- corr.index_query = *idx_i;
+ corr.index_query = idx_i;
corr.index_match = nn_indices[min_index];
- corr.distance = nn_dists[min_index];//min_dist;
+ corr.distance = nn_dists[min_index]; // min_dist;
correspondences[nr_valid_correspondences++] = corr;
}
}
- correspondences.resize (nr_valid_correspondences);
- deinitCompute ();
+ correspondences.resize(nr_valid_correspondences);
+ deinitCompute();
}
} // namespace registration
} // namespace pcl
-#endif // PCL_REGISTRATION_IMPL_CORRESPONDENCE_ESTIMATION_NORMAL_SHOOTING_H_
+#endif // PCL_REGISTRATION_IMPL_CORRESPONDENCE_ESTIMATION_NORMAL_SHOOTING_H_
*
*/
-
#ifndef PCL_REGISTRATION_CORRESPONDENCE_ESTIMATION_ORGANIZED_PROJECTION_IMPL_HPP_
#define PCL_REGISTRATION_CORRESPONDENCE_ESTIMATION_ORGANIZED_PROJECTION_IMPL_HPP_
+namespace pcl {
-namespace pcl
-{
-
-namespace registration
-{
+namespace registration {
-template <typename PointSource, typename PointTarget, typename Scalar> bool
-CorrespondenceEstimationOrganizedProjection<PointSource, PointTarget, Scalar>::initCompute ()
+template <typename PointSource, typename PointTarget, typename Scalar>
+bool
+CorrespondenceEstimationOrganizedProjection<PointSource, PointTarget, Scalar>::
+ initCompute()
{
- // Set the target_cloud_updated_ variable to true, so that the kd-tree is not built - it is not needed for this class
+ // Set the target_cloud_updated_ variable to true, so that the kd-tree is not built -
+ // it is not needed for this class
target_cloud_updated_ = false;
- if (!CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::initCompute ())
+ if (!CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::initCompute())
return (false);
/// Check if the target cloud is organized
- if (!target_->isOrganized ())
- {
- PCL_WARN ("[pcl::registration::%s::initCompute] Target cloud is not organized.\n", getClassName ().c_str ());
+ if (!target_->isOrganized()) {
+ PCL_WARN("[pcl::registration::%s::initCompute] Target cloud is not organized.\n",
+ getClassName().c_str());
return (false);
}
/// Put the projection matrix together
- projection_matrix_ (0, 0) = fx_;
- projection_matrix_ (1, 1) = fy_;
- projection_matrix_ (0, 2) = cx_;
- projection_matrix_ (1, 2) = cy_;
+ projection_matrix_(0, 0) = fx_;
+ projection_matrix_(1, 1) = fy_;
+ projection_matrix_(0, 2) = cx_;
+ projection_matrix_(1, 2) = cy_;
return (true);
}
-
-template <typename PointSource, typename PointTarget, typename Scalar> void
-CorrespondenceEstimationOrganizedProjection<PointSource, PointTarget, Scalar>::determineCorrespondences (
- pcl::Correspondences &correspondences,
- double max_distance)
+template <typename PointSource, typename PointTarget, typename Scalar>
+void
+CorrespondenceEstimationOrganizedProjection<PointSource, PointTarget, Scalar>::
+ determineCorrespondences(pcl::Correspondences& correspondences, double max_distance)
{
- if (!initCompute ())
+ if (!initCompute())
return;
- correspondences.resize (indices_->size ());
+ correspondences.resize(indices_->size());
std::size_t c_index = 0;
- for (std::vector<int>::const_iterator src_it = indices_->begin (); src_it != indices_->end (); ++src_it)
- {
- if (isFinite ((*input_)[*src_it]))
- {
- Eigen::Vector4f p_src (src_to_tgt_transformation_ * (*input_)[*src_it].getVector4fMap ());
- Eigen::Vector3f p_src3 (p_src[0], p_src[1], p_src[2]);
- Eigen::Vector3f uv (projection_matrix_ * p_src3);
+ for (const auto& src_idx : (*indices_)) {
+ if (isFinite((*input_)[src_idx])) {
+ Eigen::Vector4f p_src(src_to_tgt_transformation_ *
+ (*input_)[src_idx].getVector4fMap());
+ Eigen::Vector3f p_src3(p_src[0], p_src[1], p_src[2]);
+ Eigen::Vector3f uv(projection_matrix_ * p_src3);
/// Check if the point was behind the camera
if (uv[2] <= 0)
continue;
- int u = static_cast<int> (uv[0] / uv[2]);
- int v = static_cast<int> (uv[1] / uv[2]);
+ int u = static_cast<int>(uv[0] / uv[2]);
+ int v = static_cast<int>(uv[1] / uv[2]);
- if (u >= 0 && u < static_cast<int> (target_->width) &&
- v >= 0 && v < static_cast<int> (target_->height))
- {
- const PointTarget &pt_tgt = target_->at (u, v);
- if (!isFinite (pt_tgt))
+ if (u >= 0 && u < static_cast<int>(target_->width) && v >= 0 &&
+ v < static_cast<int>(target_->height)) {
+ const PointTarget& pt_tgt = target_->at(u, v);
+ if (!isFinite(pt_tgt))
continue;
/// Check if the depth difference is larger than the threshold
- if (std::abs (uv[2] - pt_tgt.z) > depth_threshold_)
+ if (std::abs(uv[2] - pt_tgt.z) > depth_threshold_)
continue;
- double dist = (p_src3 - pt_tgt.getVector3fMap ()).norm ();
+ double dist = (p_src3 - pt_tgt.getVector3fMap()).norm();
if (dist < max_distance)
- correspondences[c_index++] = pcl::Correspondence (*src_it, v * target_->width + u, static_cast<float> (dist));
+ correspondences[c_index++] = pcl::Correspondence(
+ src_idx, v * target_->width + u, static_cast<float>(dist));
}
}
}
- correspondences.resize (c_index);
+ correspondences.resize(c_index);
}
-
-template <typename PointSource, typename PointTarget, typename Scalar> void
-CorrespondenceEstimationOrganizedProjection<PointSource, PointTarget, Scalar>::determineReciprocalCorrespondences (
- pcl::Correspondences &correspondences,
- double max_distance)
+template <typename PointSource, typename PointTarget, typename Scalar>
+void
+CorrespondenceEstimationOrganizedProjection<PointSource, PointTarget, Scalar>::
+ determineReciprocalCorrespondences(pcl::Correspondences& correspondences,
+ double max_distance)
{
- // Call the normal determineCorrespondences (...), as doing it both ways will not improve the results
- determineCorrespondences (correspondences, max_distance);
+ // Call the normal determineCorrespondences (...), as doing it both ways will not
+ // improve the results
+ determineCorrespondences(correspondences, max_distance);
}
} // namespace registration
} // namespace pcl
-#endif // PCL_REGISTRATION_CORRESPONDENCE_ESTIMATION_ORGANIZED_PROJECTION_IMPL_HPP_
-
+#endif // PCL_REGISTRATION_CORRESPONDENCE_ESTIMATION_ORGANIZED_PROJECTION_IMPL_HPP_
*/
#ifndef PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_DISTANCE_HPP_
#define PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_DISTANCE_HPP_
-
-
+PCL_DEPRECATED_HEADER(1, 15, "")
#endif /* PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_DISTANCE_HPP_ */
*
*/
-
#ifndef PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_FEATURES_HPP_
#define PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_FEATURES_HPP_
#include <boost/pointer_cast.hpp> // for static_pointer_cast
-namespace pcl
-{
+namespace pcl {
-namespace registration
-{
+namespace registration {
-template <typename FeatureT> inline void
-CorrespondenceRejectorFeatures::setSourceFeature (
- const typename pcl::PointCloud<FeatureT>::ConstPtr &source_feature, const std::string &key)
+template <typename FeatureT>
+inline void
+CorrespondenceRejectorFeatures::setSourceFeature(
+ const typename pcl::PointCloud<FeatureT>::ConstPtr& source_feature,
+ const std::string& key)
{
- if (features_map_.count (key) == 0)
- features_map_[key].reset (new FeatureContainer<FeatureT>);
- boost::static_pointer_cast<FeatureContainer<FeatureT> > (features_map_[key])->setSourceFeature (source_feature);
+ if (features_map_.count(key) == 0)
+ features_map_[key].reset(new FeatureContainer<FeatureT>);
+ boost::static_pointer_cast<FeatureContainer<FeatureT>>(features_map_[key])
+ ->setSourceFeature(source_feature);
}
-
-template <typename FeatureT> inline typename pcl::PointCloud<FeatureT>::ConstPtr
-CorrespondenceRejectorFeatures::getSourceFeature (const std::string &key)
+template <typename FeatureT>
+inline typename pcl::PointCloud<FeatureT>::ConstPtr
+CorrespondenceRejectorFeatures::getSourceFeature(const std::string& key)
{
- if (features_map_.count (key) == 0)
+ if (features_map_.count(key) == 0)
return (nullptr);
- return (boost::static_pointer_cast<FeatureContainer<FeatureT> > (features_map_[key])->getSourceFeature ());
+ return (boost::static_pointer_cast<FeatureContainer<FeatureT>>(features_map_[key])
+ ->getSourceFeature());
}
-
-template <typename FeatureT> inline void
-CorrespondenceRejectorFeatures::setTargetFeature (
- const typename pcl::PointCloud<FeatureT>::ConstPtr &target_feature, const std::string &key)
+template <typename FeatureT>
+inline void
+CorrespondenceRejectorFeatures::setTargetFeature(
+ const typename pcl::PointCloud<FeatureT>::ConstPtr& target_feature,
+ const std::string& key)
{
- if (features_map_.count (key) == 0)
- features_map_[key].reset (new FeatureContainer<FeatureT>);
- boost::static_pointer_cast<FeatureContainer<FeatureT> > (features_map_[key])->setTargetFeature (target_feature);
+ if (features_map_.count(key) == 0)
+ features_map_[key].reset(new FeatureContainer<FeatureT>);
+ boost::static_pointer_cast<FeatureContainer<FeatureT>>(features_map_[key])
+ ->setTargetFeature(target_feature);
}
-
-template <typename FeatureT> inline typename pcl::PointCloud<FeatureT>::ConstPtr
-CorrespondenceRejectorFeatures::getTargetFeature (const std::string &key)
+template <typename FeatureT>
+inline typename pcl::PointCloud<FeatureT>::ConstPtr
+CorrespondenceRejectorFeatures::getTargetFeature(const std::string& key)
{
- if (features_map_.count (key) == 0)
+ if (features_map_.count(key) == 0)
return (nullptr);
- return (boost::static_pointer_cast<FeatureContainer<FeatureT> > (features_map_[key])->getTargetFeature ());
+ return (boost::static_pointer_cast<FeatureContainer<FeatureT>>(features_map_[key])
+ ->getTargetFeature());
}
-
-template <typename FeatureT> inline void
-CorrespondenceRejectorFeatures::setDistanceThreshold (
- double thresh, const std::string &key)
+template <typename FeatureT>
+inline void
+CorrespondenceRejectorFeatures::setDistanceThreshold(double thresh,
+ const std::string& key)
{
- if (features_map_.count (key) == 0)
- features_map_[key].reset (new FeatureContainer<FeatureT>);
- boost::static_pointer_cast<FeatureContainer<FeatureT> > (features_map_[key])->setDistanceThreshold (thresh);
+ if (features_map_.count(key) == 0)
+ features_map_[key].reset(new FeatureContainer<FeatureT>);
+ boost::static_pointer_cast<FeatureContainer<FeatureT>>(features_map_[key])
+ ->setDistanceThreshold(thresh);
}
-
-template <typename FeatureT> inline void
-CorrespondenceRejectorFeatures::setFeatureRepresentation (
- const typename pcl::PointRepresentation<FeatureT>::ConstPtr &fr,
- const std::string &key)
+template <typename FeatureT>
+inline void
+CorrespondenceRejectorFeatures::setFeatureRepresentation(
+ const typename pcl::PointRepresentation<FeatureT>::ConstPtr& fr,
+ const std::string& key)
{
- if (features_map_.count (key) == 0)
- features_map_[key].reset (new FeatureContainer<FeatureT>);
- boost::static_pointer_cast<FeatureContainer<FeatureT> > (features_map_[key])->setFeatureRepresentation (fr);
+ if (features_map_.count(key) == 0)
+ features_map_[key].reset(new FeatureContainer<FeatureT>);
+ boost::static_pointer_cast<FeatureContainer<FeatureT>>(features_map_[key])
+ ->setFeatureRepresentation(fr);
}
} // namespace registration
} // namespace pcl
#endif /* PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_FEATURES_HPP_ */
-
*/
#ifndef PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_MEDIAN_DISTANCE_HPP_
#define PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_MEDIAN_DISTANCE_HPP_
-
-
-#endif // PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_MEDIAN_DISTANCE_HPP_
+PCL_DEPRECATED_HEADER(1, 15, "")
+#endif // PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_MEDIAN_DISTANCE_HPP_
*/
#ifndef PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_ONE_TO_ONE_HPP_
#define PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_ONE_TO_ONE_HPP_
-
+PCL_DEPRECATED_HEADER(1, 15, "")
#endif /* PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_ONE_TO_ONE_HPP_ */
* POSSIBILITY OF SUCH DAMAGE.
*/
-
#ifndef PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_ORGANIZED_BOUNDARY_HPP_
#define PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_ORGANIZED_BOUNDARY_HPP_
-
+PCL_DEPRECATED_HEADER(1, 15, "")
#endif /* PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_ORGANIZED_BOUNDARY_HPP_ */
*
*/
-
#ifndef PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_POLY_HPP_
#define PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_POLY_HPP_
+namespace pcl {
-namespace pcl
-{
+namespace registration {
-namespace registration
-{
-
-template <typename SourceT, typename TargetT> void
-CorrespondenceRejectorPoly<SourceT, TargetT>::getRemainingCorrespondences (
+template <typename SourceT, typename TargetT>
+void
+CorrespondenceRejectorPoly<SourceT, TargetT>::getRemainingCorrespondences(
const pcl::Correspondences& original_correspondences,
pcl::Correspondences& remaining_correspondences)
{
remaining_correspondences = original_correspondences;
// Check source/target
- if (!input_)
- {
- PCL_ERROR ("[pcl::registration::%s::getRemainingCorrespondences] No source was input! Returning all input correspondences.\n",
- getClassName ().c_str ());
+ if (!input_) {
+ PCL_ERROR("[pcl::registration::%s::getRemainingCorrespondences] No source was "
+ "input! Returning all input correspondences.\n",
+ getClassName().c_str());
return;
}
- if (!target_)
- {
- PCL_ERROR ("[pcl::registration::%s::getRemainingCorrespondences] No target was input! Returning all input correspondences.\n",
- getClassName ().c_str ());
+ if (!target_) {
+ PCL_ERROR("[pcl::registration::%s::getRemainingCorrespondences] No target was "
+ "input! Returning all input correspondences.\n",
+ getClassName().c_str());
return;
}
// Check cardinality
- if (cardinality_ < 2)
- {
- PCL_ERROR ("[pcl::registration::%s::getRemainingCorrespondences] Polygon cardinality too low!. Returning all input correspondences.\n",
- getClassName ().c_str() );
+ if (cardinality_ < 2) {
+ PCL_ERROR("[pcl::registration::%s::getRemainingCorrespondences] Polygon "
+ "cardinality too low!. Returning all input correspondences.\n",
+ getClassName().c_str());
return;
}
// Number of input correspondences
- const int nr_correspondences = static_cast<int> (original_correspondences.size ());
+ const int nr_correspondences = static_cast<int>(original_correspondences.size());
// Not enough correspondences for polygonal rejections
- if (cardinality_ >= nr_correspondences)
- {
- PCL_ERROR ("[pcl::registration::%s::getRemainingCorrespondences] Number of correspondences smaller than polygon cardinality! Returning all input correspondences.\n",
- getClassName ().c_str() );
+ if (cardinality_ >= nr_correspondences) {
+ PCL_ERROR("[pcl::registration::%s::getRemainingCorrespondences] Number of "
+ "correspondences smaller than polygon cardinality! Returning all input "
+ "correspondences.\n",
+ getClassName().c_str());
return;
}
// Check similarity
- if (similarity_threshold_ < 0.0f || similarity_threshold_ > 1.0f)
- {
- PCL_ERROR ("[pcl::registration::%s::getRemainingCorrespondences] Invalid edge length similarity - must be in [0,1]!. Returning all input correspondences.\n",
- getClassName ().c_str() );
+ if (similarity_threshold_ < 0.0f || similarity_threshold_ > 1.0f) {
+ PCL_ERROR(
+ "[pcl::registration::%s::getRemainingCorrespondences] Invalid edge length "
+ "similarity - must be in [0,1]!. Returning all input correspondences.\n",
+ getClassName().c_str());
return;
}
similarity_threshold_squared_ = similarity_threshold_ * similarity_threshold_;
// Initialization of result
- remaining_correspondences.clear ();
- remaining_correspondences.reserve (nr_correspondences);
+ remaining_correspondences.clear();
+ remaining_correspondences.reserve(nr_correspondences);
// Number of times a correspondence is sampled and number of times it was accepted
- std::vector<int> num_samples (nr_correspondences, 0);
- std::vector<int> num_accepted (nr_correspondences, 0);
+ std::vector<int> num_samples(nr_correspondences, 0);
+ std::vector<int> num_accepted(nr_correspondences, 0);
// Main loop
- for (int i = 0; i < iterations_; ++i)
- {
+ for (int i = 0; i < iterations_; ++i) {
// Sample cardinality_ correspondences without replacement
- const std::vector<int> idx = getUniqueRandomIndices (nr_correspondences, cardinality_);
+ const std::vector<int> idx =
+ getUniqueRandomIndices(nr_correspondences, cardinality_);
// Verify the polygon similarity
- if (thresholdPolygon (original_correspondences, idx))
- {
+ if (thresholdPolygon(original_correspondences, idx)) {
// Increment sample counter and accept counter
- for (int j = 0; j < cardinality_; ++j)
- {
- ++num_samples[ idx[j] ];
- ++num_accepted[ idx[j] ];
+ for (int j = 0; j < cardinality_; ++j) {
+ ++num_samples[idx[j]];
+ ++num_accepted[idx[j]];
}
}
- else
- {
+ else {
// Not accepted, only increment sample counter
for (int j = 0; j < cardinality_; ++j)
- ++num_samples[ idx[j] ];
+ ++num_samples[idx[j]];
}
}
// Now calculate the acceptance rate of each correspondence
- std::vector<float> accept_rate (nr_correspondences, 0.0f);
- for (int i = 0; i < nr_correspondences; ++i)
- {
+ std::vector<float> accept_rate(nr_correspondences, 0.0f);
+ for (int i = 0; i < nr_correspondences; ++i) {
const int numsi = num_samples[i];
if (numsi == 0)
accept_rate[i] = 0.0f;
else
- accept_rate[i] = static_cast<float> (num_accepted[i]) / static_cast<float> (numsi);
+ accept_rate[i] = static_cast<float>(num_accepted[i]) / static_cast<float>(numsi);
}
// Compute a histogram in range [0,1] for acceptance rates
const int hist_size = nr_correspondences / 2; // TODO: Optimize this
- const std::vector<int> histogram = computeHistogram (accept_rate, 0.0f, 1.0f, hist_size);
+ const std::vector<int> histogram =
+ computeHistogram(accept_rate, 0.0f, 1.0f, hist_size);
// Find the cut point between outliers and inliers using Otsu's thresholding method
- const int cut_idx = findThresholdOtsu (histogram);
- const float cut = static_cast<float> (cut_idx) / static_cast<float> (hist_size);
+ const int cut_idx = findThresholdOtsu(histogram);
+ const float cut = static_cast<float>(cut_idx) / static_cast<float>(hist_size);
// Threshold
for (int i = 0; i < nr_correspondences; ++i)
if (accept_rate[i] > cut)
- remaining_correspondences.push_back (original_correspondences[i]);
+ remaining_correspondences.push_back(original_correspondences[i]);
}
-
-template <typename SourceT, typename TargetT> std::vector<int>
-CorrespondenceRejectorPoly<SourceT, TargetT>::computeHistogram (const std::vector<float>& data,
- float lower, float upper, int bins)
+template <typename SourceT, typename TargetT>
+std::vector<int>
+CorrespondenceRejectorPoly<SourceT, TargetT>::computeHistogram(
+ const std::vector<float>& data, float lower, float upper, int bins)
{
// Result
- std::vector<int> result (bins, 0);
+ std::vector<int> result(bins, 0);
// Last index into result and increment factor from data value --> index
const int last_idx = bins - 1;
- const float idx_per_val = static_cast<float> (bins) / (upper - lower);
+ const float idx_per_val = static_cast<float>(bins) / (upper - lower);
// Accumulate
- for (const float &value : data)
- ++result[ std::min (last_idx, int (value*idx_per_val)) ];
+ for (const float& value : data)
+ ++result[std::min(last_idx, int(value * idx_per_val))];
return (result);
}
-
-template <typename SourceT, typename TargetT> int
-CorrespondenceRejectorPoly<SourceT, TargetT>::findThresholdOtsu (const std::vector<int>& histogram)
+template <typename SourceT, typename TargetT>
+int
+CorrespondenceRejectorPoly<SourceT, TargetT>::findThresholdOtsu(
+ const std::vector<int>& histogram)
{
// Precision
const double eps = std::numeric_limits<double>::epsilon();
// Histogram dimension
- const int nbins = static_cast<int> (histogram.size ());
+ const int nbins = static_cast<int>(histogram.size());
// Mean and inverse of the number of data points
double mean = 0.0;
double sum_inv = 0.0;
- for (int i = 0; i < nbins; ++i)
- {
- mean += static_cast<double> (i * histogram[i]);
- sum_inv += static_cast<double> (histogram[i]);
+ for (int i = 0; i < nbins; ++i) {
+ mean += static_cast<double>(i * histogram[i]);
+ sum_inv += static_cast<double>(histogram[i]);
}
- sum_inv = 1.0/sum_inv;
+ sum_inv = 1.0 / sum_inv;
mean *= sum_inv;
// Probability and mean of class 1 (data to the left of threshold)
int result = 0;
// Loop over all bin values
- for (int i = 0; i < nbins; ++i)
- {
+ for (int i = 0; i < nbins; ++i) {
class_mean1 *= class_prob1;
// Probability of bin i
- const double prob_i = static_cast<double> (histogram[i]) * sum_inv;
+ const double prob_i = static_cast<double>(histogram[i]) * sum_inv;
// Class probability 1: sum of probabilities from 0 to i
class_prob1 += prob_i;
class_prob2 -= prob_i;
// Avoid division by zero below
- if (std::min (class_prob1,class_prob2) < eps || std::max (class_prob1,class_prob2) > 1.0-eps)
+ if (std::min(class_prob1, class_prob2) < eps ||
+ std::max(class_prob1, class_prob2) > 1.0 - eps)
continue;
// Class mean 1: sum of probabilities from 0 to i, weighted by bin value
- class_mean1 = (class_mean1 + static_cast<double> (i) * prob_i) / class_prob1;
+ class_mean1 = (class_mean1 + static_cast<double>(i) * prob_i) / class_prob1;
// Class mean 2: sum of probabilities from i+1 to nbins-1, weighted by bin value
- const double class_mean2 = (mean - class_prob1*class_mean1) / class_prob2;
+ const double class_mean2 = (mean - class_prob1 * class_mean1) / class_prob2;
// Between class variance
- const double between_class_variance = class_prob1 * class_prob2
- * (class_mean1 - class_mean2)
- * (class_mean1 - class_mean2);
+ const double between_class_variance = class_prob1 * class_prob2 *
+ (class_mean1 - class_mean2) *
+ (class_mean1 - class_mean2);
// If between class variance is maximized, update result
- if (between_class_variance > between_class_variance_max)
- {
+ if (between_class_variance > between_class_variance_max) {
between_class_variance_max = between_class_variance;
result = i;
}
} // namespace registration
} // namespace pcl
-#endif // PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_POLY_HPP_
-
+#endif // PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_POLY_HPP_
#ifndef PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_SAMPLE_CONSENSUS_HPP_
#define PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_SAMPLE_CONSENSUS_HPP_
-#include <unordered_map>
#include <pcl/sample_consensus/ransac.h>
#include <pcl/sample_consensus/sac_model_registration.h>
+#include <unordered_map>
-namespace pcl
-{
+namespace pcl {
-namespace registration
-{
+namespace registration {
-template <typename PointT> void
-CorrespondenceRejectorSampleConsensus<PointT>::getRemainingCorrespondences (
+template <typename PointT>
+void
+CorrespondenceRejectorSampleConsensus<PointT>::getRemainingCorrespondences(
const pcl::Correspondences& original_correspondences,
pcl::Correspondences& remaining_correspondences)
{
- if (!input_)
- {
- PCL_ERROR ("[pcl::registration::%s::getRemainingCorrespondences] No input cloud dataset was given!\n", getClassName ().c_str ());
+ if (!input_) {
+ PCL_ERROR("[pcl::registration::%s::getRemainingCorrespondences] No input cloud "
+ "dataset was given!\n",
+ getClassName().c_str());
return;
}
- if (!target_)
- {
- PCL_ERROR ("[pcl::registration::%s::getRemainingCorrespondences] No input target dataset was given!\n", getClassName ().c_str ());
+ if (!target_) {
+ PCL_ERROR("[pcl::registration::%s::getRemainingCorrespondences] No input target "
+ "dataset was given!\n",
+ getClassName().c_str());
return;
}
if (save_inliers_)
- inlier_indices_.clear ();
+ inlier_indices_.clear();
- int nr_correspondences = static_cast<int> (original_correspondences.size ());
- std::vector<int> source_indices (nr_correspondences);
- std::vector<int> target_indices (nr_correspondences);
+ int nr_correspondences = static_cast<int>(original_correspondences.size());
+ pcl::Indices source_indices(nr_correspondences);
+ pcl::Indices target_indices(nr_correspondences);
// Copy the query-match indices
- for (std::size_t i = 0; i < original_correspondences.size (); ++i)
- {
+ for (std::size_t i = 0; i < original_correspondences.size(); ++i) {
source_indices[i] = original_correspondences[i].index_query;
target_indices[i] = original_correspondences[i].index_match;
}
- {
- // From the set of correspondences found, attempt to remove outliers
- // Create the registration model
- using SampleConsensusModelRegistrationPtr = typename pcl::SampleConsensusModelRegistration<PointT>::Ptr;
- SampleConsensusModelRegistrationPtr model;
- model.reset (new pcl::SampleConsensusModelRegistration<PointT> (input_, source_indices));
- // Pass the target_indices
- model->setInputTarget (target_, target_indices);
- // Create a RANSAC model
- pcl::RandomSampleConsensus<PointT> sac (model, inlier_threshold_);
- sac.setMaxIterations (max_iterations_);
-
- // Compute the set of inliers
- if (!sac.computeModel ())
- {
- remaining_correspondences = original_correspondences;
- best_transformation_.setIdentity ();
- return;
- }
- if (refine_ && !sac.refineModel ())
- {
- PCL_ERROR ("[pcl::registration::CorrespondenceRejectorSampleConsensus::getRemainingCorrespondences] Could not refine the model! Returning an empty solution.\n");
- return;
- }
-
- std::vector<int> inliers;
- sac.getInliers (inliers);
-
- if (inliers.size () < 3)
- {
- remaining_correspondences = original_correspondences;
- best_transformation_.setIdentity ();
- return;
- }
- std::unordered_map<int, int> index_to_correspondence;
- for (int i = 0; i < nr_correspondences; ++i)
- index_to_correspondence[original_correspondences[i].index_query] = i;
-
- remaining_correspondences.resize (inliers.size ());
- for (std::size_t i = 0; i < inliers.size (); ++i)
- remaining_correspondences[i] = original_correspondences[index_to_correspondence[inliers[i]]];
-
- if (save_inliers_)
- {
- inlier_indices_.reserve (inliers.size ());
- for (const int &inlier : inliers)
- inlier_indices_.push_back (index_to_correspondence[inlier]);
- }
-
- // get best transformation
- Eigen::VectorXf model_coefficients;
- sac.getModelCoefficients (model_coefficients);
- best_transformation_.row (0) = model_coefficients.segment<4>(0);
- best_transformation_.row (1) = model_coefficients.segment<4>(4);
- best_transformation_.row (2) = model_coefficients.segment<4>(8);
- best_transformation_.row (3) = model_coefficients.segment<4>(12);
- }
+ {
+ // From the set of correspondences found, attempt to remove outliers
+ // Create the registration model
+ using SampleConsensusModelRegistrationPtr =
+ typename pcl::SampleConsensusModelRegistration<PointT>::Ptr;
+ SampleConsensusModelRegistrationPtr model;
+ model.reset(
+ new pcl::SampleConsensusModelRegistration<PointT>(input_, source_indices));
+ // Pass the target_indices
+ model->setInputTarget(target_, target_indices);
+ // Create a RANSAC model
+ pcl::RandomSampleConsensus<PointT> sac(model, inlier_threshold_);
+ sac.setMaxIterations(max_iterations_);
+
+ // Compute the set of inliers
+ if (!sac.computeModel()) {
+ remaining_correspondences = original_correspondences;
+ best_transformation_.setIdentity();
+ return;
+ }
+ if (refine_ && !sac.refineModel()) {
+ PCL_ERROR("[pcl::registration::CorrespondenceRejectorSampleConsensus::"
+ "getRemainingCorrespondences] Could not refine the model! Returning an "
+ "empty solution.\n");
+ return;
+ }
+
+ pcl::Indices inliers;
+ sac.getInliers(inliers);
+
+ if (inliers.size() < 3) {
+ remaining_correspondences = original_correspondences;
+ best_transformation_.setIdentity();
+ return;
+ }
+ std::unordered_map<int, int> index_to_correspondence;
+ for (int i = 0; i < nr_correspondences; ++i)
+ index_to_correspondence[original_correspondences[i].index_query] = i;
+
+ remaining_correspondences.resize(inliers.size());
+ for (std::size_t i = 0; i < inliers.size(); ++i)
+ remaining_correspondences[i] =
+ original_correspondences[index_to_correspondence[inliers[i]]];
+
+ if (save_inliers_) {
+ inlier_indices_.reserve(inliers.size());
+ for (const auto& inlier : inliers)
+ inlier_indices_.push_back(index_to_correspondence[inlier]);
+ }
+
+ // get best transformation
+ Eigen::VectorXf model_coefficients;
+ sac.getModelCoefficients(model_coefficients);
+ best_transformation_.row(0) = model_coefficients.segment<4>(0);
+ best_transformation_.row(1) = model_coefficients.segment<4>(4);
+ best_transformation_.row(2) = model_coefficients.segment<4>(8);
+ best_transformation_.row(3) = model_coefficients.segment<4>(12);
+ }
}
} // namespace registration
} // namespace pcl
-#endif // PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_SAMPLE_CONSENSUS_HPP_
-
+#endif // PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_SAMPLE_CONSENSUS_HPP_
#ifndef PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_SAMPLE_CONSENSUS_2D_HPP_
#define PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_SAMPLE_CONSENSUS_2D_HPP_
-#include <pcl/sample_consensus/sac_model_registration_2d.h>
#include <pcl/sample_consensus/ransac.h>
+#include <pcl/sample_consensus/sac_model_registration_2d.h>
#include <unordered_map>
+namespace pcl {
-namespace pcl
-{
-
-namespace registration
-{
+namespace registration {
-template <typename PointT> void
-CorrespondenceRejectorSampleConsensus2D<PointT>::getRemainingCorrespondences (
+template <typename PointT>
+void
+CorrespondenceRejectorSampleConsensus2D<PointT>::getRemainingCorrespondences(
const pcl::Correspondences& original_correspondences,
pcl::Correspondences& remaining_correspondences)
{
- if (!input_)
- {
- PCL_ERROR ("[pcl::registration::%s::getRemainingCorrespondences] No input cloud dataset was given!\n", getClassName ().c_str ());
+ if (!input_) {
+ PCL_ERROR("[pcl::registration::%s::getRemainingCorrespondences] No input cloud "
+ "dataset was given!\n",
+ getClassName().c_str());
return;
}
- if (!target_)
- {
- PCL_ERROR ("[pcl::registration::%s::getRemainingCorrespondences] No input target dataset was given!\n", getClassName ().c_str ());
+ if (!target_) {
+ PCL_ERROR("[pcl::registration::%s::getRemainingCorrespondences] No input target "
+ "dataset was given!\n",
+ getClassName().c_str());
return;
}
- if (projection_matrix_ == Eigen::Matrix3f::Identity ())
- {
- PCL_ERROR ("[pcl::registration::%s::getRemainingCorrespondences] Intrinsic camera parameters not given!\n", getClassName ().c_str ());
+ if (projection_matrix_ == Eigen::Matrix3f::Identity()) {
+ PCL_ERROR("[pcl::registration::%s::getRemainingCorrespondences] Intrinsic camera "
+ "parameters not given!\n",
+ getClassName().c_str());
return;
}
- int nr_correspondences = static_cast<int> (original_correspondences.size ());
- std::vector<int> source_indices (nr_correspondences);
- std::vector<int> target_indices (nr_correspondences);
+ int nr_correspondences = static_cast<int>(original_correspondences.size());
+ pcl::Indices source_indices(nr_correspondences);
+ pcl::Indices target_indices(nr_correspondences);
// Copy the query-match indices
- for (std::size_t i = 0; i < original_correspondences.size (); ++i)
- {
+ for (std::size_t i = 0; i < original_correspondences.size(); ++i) {
source_indices[i] = original_correspondences[i].index_query;
target_indices[i] = original_correspondences[i].index_match;
}
// From the set of correspondences found, attempt to remove outliers
- typename pcl::SampleConsensusModelRegistration2D<PointT>::Ptr model (new pcl::SampleConsensusModelRegistration2D<PointT> (input_, source_indices));
+ typename pcl::SampleConsensusModelRegistration2D<PointT>::Ptr model(
+ new pcl::SampleConsensusModelRegistration2D<PointT>(input_, source_indices));
// Pass the target_indices
- model->setInputTarget (target_, target_indices);
- model->setProjectionMatrix (projection_matrix_);
+ model->setInputTarget(target_, target_indices);
+ model->setProjectionMatrix(projection_matrix_);
// Create a RANSAC model
- pcl::RandomSampleConsensus<PointT> sac (model, inlier_threshold_);
- sac.setMaxIterations (max_iterations_);
+ pcl::RandomSampleConsensus<PointT> sac(model, inlier_threshold_);
+ sac.setMaxIterations(max_iterations_);
// Compute the set of inliers
- if (!sac.computeModel ())
- {
- PCL_ERROR ("[pcl::registration::%s::getRemainingCorrespondences] Error computing model! Returning the original correspondences...\n", getClassName ().c_str ());
+ if (!sac.computeModel()) {
+ PCL_ERROR("[pcl::registration::%s::getRemainingCorrespondences] Error computing "
+ "model! Returning the original correspondences...\n",
+ getClassName().c_str());
remaining_correspondences = original_correspondences;
- best_transformation_.setIdentity ();
+ best_transformation_.setIdentity();
return;
}
- if (refine_ && !sac.refineModel (2.0))
- PCL_WARN ("[pcl::registration::%s::getRemainingCorrespondences] Error refining model!\n", getClassName ().c_str ());
-
- std::vector<int> inliers;
- sac.getInliers (inliers);
-
- if (inliers.size () < 3)
- {
- PCL_ERROR ("[pcl::registration::%s::getRemainingCorrespondences] Less than 3 correspondences found!\n", getClassName ().c_str ());
+ if (refine_ && !sac.refineModel(2.0))
+ PCL_WARN(
+ "[pcl::registration::%s::getRemainingCorrespondences] Error refining model!\n",
+ getClassName().c_str());
+
+ pcl::Indices inliers;
+ sac.getInliers(inliers);
+
+ if (inliers.size() < 3) {
+ PCL_ERROR("[pcl::registration::%s::getRemainingCorrespondences] Less than 3 "
+ "correspondences found!\n",
+ getClassName().c_str());
remaining_correspondences = original_correspondences;
- best_transformation_.setIdentity ();
+ best_transformation_.setIdentity();
return;
}
for (int i = 0; i < nr_correspondences; ++i)
index_to_correspondence[original_correspondences[i].index_query] = i;
- remaining_correspondences.resize (inliers.size ());
- for (std::size_t i = 0; i < inliers.size (); ++i)
- remaining_correspondences[i] = original_correspondences[index_to_correspondence[inliers[i]]];
+ remaining_correspondences.resize(inliers.size());
+ for (std::size_t i = 0; i < inliers.size(); ++i)
+ remaining_correspondences[i] =
+ original_correspondences[index_to_correspondence[inliers[i]]];
// get best transformation
Eigen::VectorXf model_coefficients;
- sac.getModelCoefficients (model_coefficients);
- best_transformation_.row (0) = model_coefficients.segment<4>(0);
- best_transformation_.row (1) = model_coefficients.segment<4>(4);
- best_transformation_.row (2) = model_coefficients.segment<4>(8);
- best_transformation_.row (3) = model_coefficients.segment<4>(12);
+ sac.getModelCoefficients(model_coefficients);
+ best_transformation_.row(0) = model_coefficients.segment<4>(0);
+ best_transformation_.row(1) = model_coefficients.segment<4>(4);
+ best_transformation_.row(2) = model_coefficients.segment<4>(8);
+ best_transformation_.row(3) = model_coefficients.segment<4>(12);
}
} // namespace registration
} // namespace pcl
-#endif // PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_SAMPLE_CONSENSUS_2D_HPP_
-
+#endif // PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_SAMPLE_CONSENSUS_2D_HPP_
*/
#ifndef PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_SURFACE_NORMAL_HPP_
#define PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_SURFACE_NORMAL_HPP_
-
+PCL_DEPRECATED_HEADER(1, 15, "")
#endif /* PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_SURFACE_NORMAL_HPP_ */
*/
#ifndef PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_TRIMMED_HPP_
#define PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_TRIMMED_HPP_
-
+PCL_DEPRECATED_HEADER(1, 15, "")
#endif /* PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_TRIMMED_HPP_ */
*/
#ifndef PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_VAR_TRIMMED_HPP_
#define PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_VAR_TRIMMED_HPP_
-
+PCL_DEPRECATED_HEADER(1, 15, "")
#endif /* PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_VAR_TRIMMED_HPP_ */
#pragma once
-#include <pcl/registration/eigen.h>
-
#include <cstddef>
#include <vector>
+namespace pcl {
-namespace pcl
-{
-
-namespace registration
-{
+namespace registration {
inline void
-getCorDistMeanStd (const pcl::Correspondences &correspondences, double &mean, double &stddev)
+getCorDistMeanStd(const pcl::Correspondences& correspondences,
+ double& mean,
+ double& stddev)
{
- if (correspondences.empty ())
+ if (correspondences.empty())
return;
double sum = 0, sq_sum = 0;
- for (const auto &correspondence : correspondences)
- {
+ for (const auto& correspondence : correspondences) {
sum += correspondence.distance;
sq_sum += correspondence.distance * correspondence.distance;
}
- mean = sum / static_cast<double> (correspondences.size ());
- double variance = (sq_sum - sum * sum / static_cast<double> (correspondences.size ())) / static_cast<double> (correspondences.size () - 1);
- stddev = sqrt (variance);
+ mean = sum / static_cast<double>(correspondences.size());
+ double variance = (sq_sum - sum * sum / static_cast<double>(correspondences.size())) /
+ static_cast<double>(correspondences.size() - 1);
+ stddev = sqrt(variance);
}
inline void
-getQueryIndices (const pcl::Correspondences& correspondences, std::vector<int>& indices)
+getQueryIndices(const pcl::Correspondences& correspondences, pcl::Indices& indices)
{
- indices.resize (correspondences.size ());
- for (std::size_t i = 0; i < correspondences.size (); ++i)
+ indices.resize(correspondences.size());
+ for (std::size_t i = 0; i < correspondences.size(); ++i)
indices[i] = correspondences[i].index_query;
}
-
inline void
-getMatchIndices (const pcl::Correspondences& correspondences, std::vector<int>& indices)
+getMatchIndices(const pcl::Correspondences& correspondences, pcl::Indices& indices)
{
- indices.resize (correspondences.size ());
- for (std::size_t i = 0; i < correspondences.size (); ++i)
+ indices.resize(correspondences.size());
+ for (std::size_t i = 0; i < correspondences.size(); ++i)
indices[i] = correspondences[i].index_match;
}
} // namespace registration
} // namespace pcl
-
#include <pcl/console/print.h>
+namespace pcl {
-namespace pcl
-{
-
-namespace registration
-{
+namespace registration {
-template <typename Scalar> bool
-DefaultConvergenceCriteria<Scalar>::hasConverged ()
+template <typename Scalar>
+bool
+DefaultConvergenceCriteria<Scalar>::hasConverged()
{
- if (convergence_state_ != CONVERGENCE_CRITERIA_NOT_CONVERGED)
- {
- //If it already converged or failed before, reset.
+ if (convergence_state_ != CONVERGENCE_CRITERIA_NOT_CONVERGED) {
+ // If it already converged or failed before, reset.
iterations_similar_transforms_ = 0;
convergence_state_ = CONVERGENCE_CRITERIA_NOT_CONVERGED;
}
bool is_similar = false;
- PCL_DEBUG ("[pcl::DefaultConvergenceCriteria::hasConverged] Iteration %d out of %d.\n", iterations_, max_iterations_);
+ PCL_DEBUG("[pcl::DefaultConvergenceCriteria::hasConverged] Iteration %d out of %d.\n",
+ iterations_,
+ max_iterations_);
// 1. Number of iterations has reached the maximum user imposed number of iterations
- if (iterations_ >= max_iterations_)
- {
- if (!failure_after_max_iter_)
- {
+ if (iterations_ >= max_iterations_) {
+ if (!failure_after_max_iter_) {
convergence_state_ = CONVERGENCE_CRITERIA_ITERATIONS;
return (true);
}
convergence_state_ = CONVERGENCE_CRITERIA_FAILURE_AFTER_MAX_ITERATIONS;
}
- // 2. The epsilon (difference) between the previous transformation and the current estimated transformation
- double cos_angle = 0.5 * (transformation_.coeff (0, 0) + transformation_.coeff (1, 1) + transformation_.coeff (2, 2) - 1);
- double translation_sqr = transformation_.coeff (0, 3) * transformation_.coeff (0, 3) +
- transformation_.coeff (1, 3) * transformation_.coeff (1, 3) +
- transformation_.coeff (2, 3) * transformation_.coeff (2, 3);
- PCL_DEBUG ("[pcl::DefaultConvergenceCriteria::hasConverged] Current transformation gave %f rotation (cosine) and %f translation.\n", cos_angle, translation_sqr);
-
- if (cos_angle >= rotation_threshold_ && translation_sqr <= translation_threshold_)
- {
- if (iterations_similar_transforms_ >= max_iterations_similar_transforms_)
- {
+ // 2. The epsilon (difference) between the previous transformation and the current
+ // estimated transformation
+ double cos_angle = 0.5 * (transformation_.coeff(0, 0) + transformation_.coeff(1, 1) +
+ transformation_.coeff(2, 2) - 1);
+ double translation_sqr = transformation_.coeff(0, 3) * transformation_.coeff(0, 3) +
+ transformation_.coeff(1, 3) * transformation_.coeff(1, 3) +
+ transformation_.coeff(2, 3) * transformation_.coeff(2, 3);
+ PCL_DEBUG("[pcl::DefaultConvergenceCriteria::hasConverged] Current transformation "
+ "gave %f rotation (cosine) and %f translation.\n",
+ cos_angle,
+ translation_sqr);
+
+ if (cos_angle >= rotation_threshold_ && translation_sqr <= translation_threshold_) {
+ if (iterations_similar_transforms_ >= max_iterations_similar_transforms_) {
convergence_state_ = CONVERGENCE_CRITERIA_TRANSFORM;
return (true);
}
is_similar = true;
}
- correspondences_cur_mse_ = calculateMSE (correspondences_);
- PCL_DEBUG ("[pcl::DefaultConvergenceCriteria::hasConverged] Previous / Current MSE for correspondences distances is: %f / %f.\n", correspondences_prev_mse_, correspondences_cur_mse_);
-
- // 3. The relative sum of Euclidean squared errors is smaller than a user defined threshold
- // Absolute
- if (std::abs (correspondences_cur_mse_ - correspondences_prev_mse_) < mse_threshold_absolute_)
- {
- if (iterations_similar_transforms_ >= max_iterations_similar_transforms_)
- {
+ correspondences_cur_mse_ = calculateMSE(correspondences_);
+ PCL_DEBUG("[pcl::DefaultConvergenceCriteria::hasConverged] Previous / Current MSE "
+ "for correspondences distances is: %f / %f.\n",
+ correspondences_prev_mse_,
+ correspondences_cur_mse_);
+
+ // 3. The relative sum of Euclidean squared errors is smaller than a user defined
+ // threshold Absolute
+ if (std::abs(correspondences_cur_mse_ - correspondences_prev_mse_) <
+ mse_threshold_absolute_) {
+ if (iterations_similar_transforms_ >= max_iterations_similar_transforms_) {
convergence_state_ = CONVERGENCE_CRITERIA_ABS_MSE;
return (true);
}
}
// Relative
- if (std::abs (correspondences_cur_mse_ - correspondences_prev_mse_) / correspondences_prev_mse_ < mse_threshold_relative_)
- {
- if (iterations_similar_transforms_ >= max_iterations_similar_transforms_)
- {
+ if (std::abs(correspondences_cur_mse_ - correspondences_prev_mse_) /
+ correspondences_prev_mse_ <
+ mse_threshold_relative_) {
+ if (iterations_similar_transforms_ >= max_iterations_similar_transforms_) {
convergence_state_ = CONVERGENCE_CRITERIA_REL_MSE;
return (true);
}
is_similar = true;
}
- if (is_similar)
- {
+ if (is_similar) {
// Increment the number of transforms that the thresholds are allowed to be similar
++iterations_similar_transforms_;
}
- else
- {
+ else {
// When the transform becomes large, reset.
iterations_similar_transforms_ = 0;
}
} // namespace registration
} // namespace pcl
-#endif // PCL_REGISTRATION_DEFAULT_CONVERGENCE_CRITERIA_HPP_
-
+#endif // PCL_REGISTRATION_DEFAULT_CONVERGENCE_CRITERIA_HPP_
#ifndef PCL_REGISTRATION_IMPL_ELCH_H_
#define PCL_REGISTRATION_IMPL_ELCH_H_
+#include <pcl/common/transforms.h>
+#include <pcl/registration/registration.h>
+
+#include <boost/graph/dijkstra_shortest_paths.hpp> // for dijkstra_shortest_paths
+
#include <algorithm>
#include <list>
#include <tuple>
-#include <pcl/common/transforms.h>
-#include <pcl/registration/eigen.h>
-#include <pcl/registration/boost.h>
-#include <pcl/registration/registration.h>
-
//////////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointT> void
-pcl::registration::ELCH<PointT>::loopOptimizerAlgorithm (LOAGraph &g, double *weights)
+template <typename PointT>
+void
+pcl::registration::ELCH<PointT>::loopOptimizerAlgorithm(LOAGraph& g, double* weights)
{
std::list<int> crossings, branches;
- crossings.push_back (static_cast<int> (loop_start_));
- crossings.push_back (static_cast<int> (loop_end_));
+ crossings.push_back(static_cast<int>(loop_start_));
+ crossings.push_back(static_cast<int>(loop_end_));
weights[loop_start_] = 0;
weights[loop_end_] = 1;
- int *p = new int[num_vertices (g)];
- int *p_min = new int[num_vertices (g)];
- double *d = new double[num_vertices (g)];
- double *d_min = new double[num_vertices (g)];
+ int* p = new int[num_vertices(g)];
+ int* p_min = new int[num_vertices(g)];
+ double* d = new double[num_vertices(g)];
+ double* d_min = new double[num_vertices(g)];
bool do_swap = false;
std::list<int>::iterator start_min, end_min;
// process all junctions
- while (!crossings.empty ())
- {
+ while (!crossings.empty()) {
double dist = -1;
// find shortest crossing for all vertices on the loop
- for (auto crossings_it = crossings.begin (); crossings_it != crossings.end (); )
- {
- dijkstra_shortest_paths (g, *crossings_it,
- predecessor_map(boost::make_iterator_property_map(p, get(boost::vertex_index, g))).
- distance_map(boost::make_iterator_property_map(d, get(boost::vertex_index, g))));
+ for (auto crossings_it = crossings.begin(); crossings_it != crossings.end();) {
+ dijkstra_shortest_paths(g,
+ *crossings_it,
+ predecessor_map(boost::make_iterator_property_map(
+ p, get(boost::vertex_index, g)))
+ .distance_map(boost::make_iterator_property_map(
+ d, get(boost::vertex_index, g))));
auto end_it = crossings_it;
end_it++;
// find shortest crossing for one vertex
- for (; end_it != crossings.end (); end_it++)
- {
- if (*end_it != p[*end_it] && (dist < 0 || d[*end_it] < dist))
- {
+ for (; end_it != crossings.end(); end_it++) {
+ if (*end_it != p[*end_it] && (dist < 0 || d[*end_it] < dist)) {
dist = d[*end_it];
start_min = crossings_it;
end_min = end_it;
do_swap = true;
}
}
- if (do_swap)
- {
- std::swap (p, p_min);
- std::swap (d, d_min);
+ if (do_swap) {
+ std::swap(p, p_min);
+ std::swap(d, d_min);
do_swap = false;
}
// vertex starts a branch
- if (dist < 0)
- {
- branches.push_back (static_cast<int> (*crossings_it));
- crossings_it = crossings.erase (crossings_it);
+ if (dist < 0) {
+ branches.push_back(static_cast<int>(*crossings_it));
+ crossings_it = crossings.erase(crossings_it);
}
else
crossings_it++;
}
- if (dist > -1)
- {
- remove_edge (*end_min, p_min[*end_min], g);
- for (int i = p_min[*end_min]; i != *start_min; i = p_min[i])
- {
- //even right with weights[*start_min] > weights[*end_min]! (math works)
- weights[i] = weights[*start_min] + (weights[*end_min] - weights[*start_min]) * d_min[i] / d_min[*end_min];
- remove_edge (i, p_min[i], g);
- if (degree (i, g) > 0)
- {
- crossings.push_back (i);
+ if (dist > -1) {
+ remove_edge(*end_min, p_min[*end_min], g);
+ for (int i = p_min[*end_min]; i != *start_min; i = p_min[i]) {
+ // even right with weights[*start_min] > weights[*end_min]! (math works)
+ weights[i] = weights[*start_min] + (weights[*end_min] - weights[*start_min]) *
+ d_min[i] / d_min[*end_min];
+ remove_edge(i, p_min[i], g);
+ if (degree(i, g) > 0) {
+ crossings.push_back(i);
}
}
- if (degree (*start_min, g) == 0)
- crossings.erase (start_min);
+ if (degree(*start_min, g) == 0)
+ crossings.erase(start_min);
- if (degree (*end_min, g) == 0)
- crossings.erase (end_min);
+ if (degree(*end_min, g) == 0)
+ crossings.erase(end_min);
}
}
boost::graph_traits<LOAGraph>::adjacency_iterator adjacent_it, adjacent_it_end;
// error propagation
- while (!branches.empty ())
- {
- int s = branches.front ();
- branches.pop_front ();
+ while (!branches.empty()) {
+ int s = branches.front();
+ branches.pop_front();
- for (std::tie (adjacent_it, adjacent_it_end) = adjacent_vertices (s, g); adjacent_it != adjacent_it_end; ++adjacent_it)
- {
+ for (std::tie(adjacent_it, adjacent_it_end) = adjacent_vertices(s, g);
+ adjacent_it != adjacent_it_end;
+ ++adjacent_it) {
weights[*adjacent_it] = weights[s];
- if (degree (*adjacent_it, g) > 1)
- branches.push_back (static_cast<int> (*adjacent_it));
+ if (degree(*adjacent_it, g) > 1)
+ branches.push_back(static_cast<int>(*adjacent_it));
}
- clear_vertex (s, g);
+ clear_vertex(s, g);
}
}
//////////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointT> bool
-pcl::registration::ELCH<PointT>::initCompute ()
+template <typename PointT>
+bool
+pcl::registration::ELCH<PointT>::initCompute()
{
/*if (!PCLBase<PointT>::initCompute ())
{
return (false);
}*/ //TODO
- if (loop_end_ == 0)
- {
- PCL_ERROR ("[pcl::registration::ELCH::initCompute] no end of loop defined!\n");
- deinitCompute ();
+ if (loop_end_ == 0) {
+ PCL_ERROR("[pcl::registration::ELCH::initCompute] no end of loop defined!\n");
+ deinitCompute();
return (false);
}
- //compute transformation if it's not given
- if (compute_loop_)
- {
- PointCloudPtr meta_start (new PointCloud);
- PointCloudPtr meta_end (new PointCloud);
+ // compute transformation if it's not given
+ if (compute_loop_) {
+ PointCloudPtr meta_start(new PointCloud);
+ PointCloudPtr meta_end(new PointCloud);
*meta_start = *(*loop_graph_)[loop_start_].cloud;
*meta_end = *(*loop_graph_)[loop_end_].cloud;
typename boost::graph_traits<LoopGraph>::adjacency_iterator si, si_end;
- for (std::tie (si, si_end) = adjacent_vertices (loop_start_, *loop_graph_); si != si_end; si++)
+ for (std::tie(si, si_end) = adjacent_vertices(loop_start_, *loop_graph_);
+ si != si_end;
+ si++)
*meta_start += *(*loop_graph_)[*si].cloud;
- for (std::tie (si, si_end) = adjacent_vertices (loop_end_, *loop_graph_); si != si_end; si++)
+ for (std::tie(si, si_end) = adjacent_vertices(loop_end_, *loop_graph_);
+ si != si_end;
+ si++)
*meta_end += *(*loop_graph_)[*si].cloud;
- //TODO use real pose instead of centroid
- //Eigen::Vector4f pose_start;
- //pcl::compute3DCentroid (*(*loop_graph_)[loop_start_].cloud, pose_start);
-
- //Eigen::Vector4f pose_end;
- //pcl::compute3DCentroid (*(*loop_graph_)[loop_end_].cloud, pose_end);
+ // TODO use real pose instead of centroid
+ // Eigen::Vector4f pose_start;
+ // pcl::compute3DCentroid (*(*loop_graph_)[loop_start_].cloud, pose_start);
- PointCloudPtr tmp (new PointCloud);
- //Eigen::Vector4f diff = pose_start - pose_end;
- //Eigen::Translation3f translation (diff.head (3));
- //Eigen::Affine3f trans = translation * Eigen::Quaternionf::Identity ();
- //pcl::transformPointCloud (*(*loop_graph_)[loop_end_].cloud, *tmp, trans);
+ // Eigen::Vector4f pose_end;
+ // pcl::compute3DCentroid (*(*loop_graph_)[loop_end_].cloud, pose_end);
- reg_->setInputTarget (meta_start);
+ PointCloudPtr tmp(new PointCloud);
+ // Eigen::Vector4f diff = pose_start - pose_end;
+ // Eigen::Translation3f translation (diff.head (3));
+ // Eigen::Affine3f trans = translation * Eigen::Quaternionf::Identity ();
+ // pcl::transformPointCloud (*(*loop_graph_)[loop_end_].cloud, *tmp, trans);
- reg_->setInputSource (meta_end);
+ reg_->setInputTarget(meta_start);
- reg_->align (*tmp);
+ reg_->setInputSource(meta_end);
- loop_transform_ = reg_->getFinalTransformation ();
- //TODO hack
- //loop_transform_ *= trans.matrix ();
+ reg_->align(*tmp);
+ loop_transform_ = reg_->getFinalTransformation();
+ // TODO hack
+ // loop_transform_ *= trans.matrix ();
}
return (true);
}
//////////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointT> void
-pcl::registration::ELCH<PointT>::compute ()
+template <typename PointT>
+void
+pcl::registration::ELCH<PointT>::compute()
{
- if (!initCompute ())
- {
+ if (!initCompute()) {
return;
}
LOAGraph grb[4];
typename boost::graph_traits<LoopGraph>::edge_iterator edge_it, edge_it_end;
- for (std::tie (edge_it, edge_it_end) = edges (*loop_graph_); edge_it != edge_it_end; edge_it++)
- {
- for (auto &j : grb)
- add_edge (source (*edge_it, *loop_graph_), target (*edge_it, *loop_graph_), 1, j); //TODO add variance
+ for (std::tie(edge_it, edge_it_end) = edges(*loop_graph_); edge_it != edge_it_end;
+ edge_it++) {
+ for (auto& j : grb)
+ add_edge(source(*edge_it, *loop_graph_),
+ target(*edge_it, *loop_graph_),
+ 1,
+ j); // TODO add variance
}
- double *weights[4];
- for (int i = 0; i < 4; i++)
- {
- weights[i] = new double[num_vertices (*loop_graph_)];
- loopOptimizerAlgorithm (grb[i], weights[i]);
+ double* weights[4];
+ for (int i = 0; i < 4; i++) {
+ weights[i] = new double[num_vertices(*loop_graph_)];
+ loopOptimizerAlgorithm(grb[i], weights[i]);
}
- //TODO use pose
- //Eigen::Vector4f cend;
- //pcl::compute3DCentroid (*((*loop_graph_)[loop_end_].cloud), cend);
- //Eigen::Translation3f tend (cend.head (3));
- //Eigen::Affine3f aend (tend);
- //Eigen::Affine3f aendI = aend.inverse ();
-
- //TODO iterate ovr loop_graph_
- //typename boost::graph_traits<LoopGraph>::vertex_iterator vertex_it, vertex_it_end;
- //for (std::tie (vertex_it, vertex_it_end) = vertices (*loop_graph_); vertex_it != vertex_it_end; vertex_it++)
- for (std::size_t i = 0; i < num_vertices (*loop_graph_); i++)
- {
+ // TODO use pose
+ // Eigen::Vector4f cend;
+ // pcl::compute3DCentroid (*((*loop_graph_)[loop_end_].cloud), cend);
+ // Eigen::Translation3f tend (cend.head (3));
+ // Eigen::Affine3f aend (tend);
+ // Eigen::Affine3f aendI = aend.inverse ();
+
+ // TODO iterate ovr loop_graph_
+ // typename boost::graph_traits<LoopGraph>::vertex_iterator vertex_it, vertex_it_end;
+ // for (std::tie (vertex_it, vertex_it_end) = vertices (*loop_graph_); vertex_it !=
+ // vertex_it_end; vertex_it++)
+ for (std::size_t i = 0; i < num_vertices(*loop_graph_); i++) {
Eigen::Vector3f t2;
- t2[0] = loop_transform_ (0, 3) * static_cast<float> (weights[0][i]);
- t2[1] = loop_transform_ (1, 3) * static_cast<float> (weights[1][i]);
- t2[2] = loop_transform_ (2, 3) * static_cast<float> (weights[2][i]);
+ t2[0] = loop_transform_(0, 3) * static_cast<float>(weights[0][i]);
+ t2[1] = loop_transform_(1, 3) * static_cast<float>(weights[1][i]);
+ t2[2] = loop_transform_(2, 3) * static_cast<float>(weights[2][i]);
- Eigen::Affine3f bl (loop_transform_);
- Eigen::Quaternionf q (bl.rotation ());
+ Eigen::Affine3f bl(loop_transform_);
+ Eigen::Quaternionf q(bl.rotation());
Eigen::Quaternionf q2;
- q2 = Eigen::Quaternionf::Identity ().slerp (static_cast<float> (weights[3][i]), q);
+ q2 = Eigen::Quaternionf::Identity().slerp(static_cast<float>(weights[3][i]), q);
- //TODO use rotation from branch start
- Eigen::Translation3f t3 (t2);
- Eigen::Affine3f a (t3 * q2);
- //a = aend * a * aendI;
+ // TODO use rotation from branch start
+ Eigen::Translation3f t3(t2);
+ Eigen::Affine3f a(t3 * q2);
+ // a = aend * a * aendI;
- pcl::transformPointCloud (*(*loop_graph_)[i].cloud, *(*loop_graph_)[i].cloud, a);
+ pcl::transformPointCloud(*(*loop_graph_)[i].cloud, *(*loop_graph_)[i].cloud, a);
(*loop_graph_)[i].transform = a;
}
- add_edge (loop_start_, loop_end_, *loop_graph_);
+ add_edge(loop_start_, loop_end_, *loop_graph_);
- deinitCompute ();
+ deinitCompute();
}
#endif // PCL_REGISTRATION_IMPL_ELCH_H_
#ifndef PCL_REGISTRATION_IMPL_GICP_HPP_
#define PCL_REGISTRATION_IMPL_GICP_HPP_
-#include <pcl/registration/boost.h>
#include <pcl/registration/exceptions.h>
-
-namespace pcl
-{
+namespace pcl {
template <typename PointSource, typename PointTarget>
-template<typename PointT> void
-GeneralizedIterativeClosestPoint<PointSource, PointTarget>::computeCovariances(typename pcl::PointCloud<PointT>::ConstPtr cloud,
- const typename pcl::search::KdTree<PointT>::Ptr kdtree,
- MatricesVector& cloud_covariances)
+template <typename PointT>
+void
+GeneralizedIterativeClosestPoint<PointSource, PointTarget>::computeCovariances(
+ typename pcl::PointCloud<PointT>::ConstPtr cloud,
+ const typename pcl::search::KdTree<PointT>::Ptr kdtree,
+ MatricesVector& cloud_covariances)
{
- if (k_correspondences_ > int (cloud->size ()))
- {
- PCL_ERROR ("[pcl::GeneralizedIterativeClosestPoint::computeCovariances] Number or points in cloud (%lu) is less than k_correspondences_ (%lu)!\n", cloud->size (), k_correspondences_);
+ if (k_correspondences_ > int(cloud->size())) {
+ PCL_ERROR("[pcl::GeneralizedIterativeClosestPoint::computeCovariances] Number or "
+ "points in cloud (%lu) is less than k_correspondences_ (%lu)!\n",
+ cloud->size(),
+ k_correspondences_);
return;
}
Eigen::Vector3d mean;
- std::vector<int> nn_indecies; nn_indecies.reserve (k_correspondences_);
- std::vector<float> nn_dist_sq; nn_dist_sq.reserve (k_correspondences_);
+ pcl::Indices nn_indecies;
+ nn_indecies.reserve(k_correspondences_);
+ std::vector<float> nn_dist_sq;
+ nn_dist_sq.reserve(k_correspondences_);
// We should never get there but who knows
- if(cloud_covariances.size () < cloud->size ())
- cloud_covariances.resize (cloud->size ());
-
- MatricesVector::iterator matrices_iterator = cloud_covariances.begin ();
- for(auto points_iterator = cloud->begin ();
- points_iterator != cloud->end ();
- ++points_iterator, ++matrices_iterator)
- {
- const PointT &query_point = *points_iterator;
- Eigen::Matrix3d &cov = *matrices_iterator;
+ if (cloud_covariances.size() < cloud->size())
+ cloud_covariances.resize(cloud->size());
+
+ MatricesVector::iterator matrices_iterator = cloud_covariances.begin();
+ for (auto points_iterator = cloud->begin(); points_iterator != cloud->end();
+ ++points_iterator, ++matrices_iterator) {
+ const PointT& query_point = *points_iterator;
+ Eigen::Matrix3d& cov = *matrices_iterator;
// Zero out the cov and mean
- cov.setZero ();
- mean.setZero ();
+ cov.setZero();
+ mean.setZero();
// Search for the K nearest neighbours
kdtree->nearestKSearch(query_point, k_correspondences_, nn_indecies, nn_dist_sq);
// Find the covariance matrix
- for(int j = 0; j < k_correspondences_; j++) {
- const PointT &pt = (*cloud)[nn_indecies[j]];
+ for (int j = 0; j < k_correspondences_; j++) {
+ const PointT& pt = (*cloud)[nn_indecies[j]];
mean[0] += pt.x;
mean[1] += pt.y;
mean[2] += pt.z;
- cov(0,0) += pt.x*pt.x;
+ cov(0, 0) += pt.x * pt.x;
- cov(1,0) += pt.y*pt.x;
- cov(1,1) += pt.y*pt.y;
+ cov(1, 0) += pt.y * pt.x;
+ cov(1, 1) += pt.y * pt.y;
- cov(2,0) += pt.z*pt.x;
- cov(2,1) += pt.z*pt.y;
- cov(2,2) += pt.z*pt.z;
+ cov(2, 0) += pt.z * pt.x;
+ cov(2, 1) += pt.z * pt.y;
+ cov(2, 2) += pt.z * pt.z;
}
- mean /= static_cast<double> (k_correspondences_);
+ mean /= static_cast<double>(k_correspondences_);
// Get the actual covariance
for (int k = 0; k < 3; k++)
- for (int l = 0; l <= k; l++)
- {
- cov(k,l) /= static_cast<double> (k_correspondences_);
- cov(k,l) -= mean[k]*mean[l];
- cov(l,k) = cov(k,l);
+ for (int l = 0; l <= k; l++) {
+ cov(k, l) /= static_cast<double>(k_correspondences_);
+ cov(k, l) -= mean[k] * mean[l];
+ cov(l, k) = cov(k, l);
}
// Compute the SVD (covariance matrix is symmetric so U = V')
Eigen::JacobiSVD<Eigen::Matrix3d> svd(cov, Eigen::ComputeFullU);
- cov.setZero ();
- Eigen::Matrix3d U = svd.matrixU ();
- // Reconstitute the covariance matrix with modified singular values using the column // vectors in V.
- for(int k = 0; k < 3; k++) {
+ cov.setZero();
+ Eigen::Matrix3d U = svd.matrixU();
+ // Reconstitute the covariance matrix with modified singular values using the column
+ // // vectors in V.
+ for (int k = 0; k < 3; k++) {
Eigen::Vector3d col = U.col(k);
double v = 1.; // biggest 2 singular values replaced by 1
- if(k == 2) // smallest singular value replaced by gicp_epsilon
+ if (k == 2) // smallest singular value replaced by gicp_epsilon
v = gicp_epsilon_;
- cov+= v * col * col.transpose();
+ cov += v * col * col.transpose();
}
}
}
-
-template <typename PointSource, typename PointTarget> void
-GeneralizedIterativeClosestPoint<PointSource, PointTarget>::computeRDerivative(const Vector6d &x, const Eigen::Matrix3d &R, Vector6d& g) const
+template <typename PointSource, typename PointTarget>
+void
+GeneralizedIterativeClosestPoint<PointSource, PointTarget>::computeRDerivative(
+ const Vector6d& x, const Eigen::Matrix3d& R, Vector6d& g) const
{
Eigen::Matrix3d dR_dPhi;
Eigen::Matrix3d dR_dTheta;
double ctheta = std::cos(theta), stheta = sin(theta);
double cpsi = std::cos(psi), spsi = sin(psi);
- dR_dPhi(0,0) = 0.;
- dR_dPhi(1,0) = 0.;
- dR_dPhi(2,0) = 0.;
+ dR_dPhi(0, 0) = 0.;
+ dR_dPhi(1, 0) = 0.;
+ dR_dPhi(2, 0) = 0.;
- dR_dPhi(0,1) = sphi*spsi + cphi*cpsi*stheta;
- dR_dPhi(1,1) = -cpsi*sphi + cphi*spsi*stheta;
- dR_dPhi(2,1) = cphi*ctheta;
+ dR_dPhi(0, 1) = sphi * spsi + cphi * cpsi * stheta;
+ dR_dPhi(1, 1) = -cpsi * sphi + cphi * spsi * stheta;
+ dR_dPhi(2, 1) = cphi * ctheta;
- dR_dPhi(0,2) = cphi*spsi - cpsi*sphi*stheta;
- dR_dPhi(1,2) = -cphi*cpsi - sphi*spsi*stheta;
- dR_dPhi(2,2) = -ctheta*sphi;
+ dR_dPhi(0, 2) = cphi * spsi - cpsi * sphi * stheta;
+ dR_dPhi(1, 2) = -cphi * cpsi - sphi * spsi * stheta;
+ dR_dPhi(2, 2) = -ctheta * sphi;
- dR_dTheta(0,0) = -cpsi*stheta;
- dR_dTheta(1,0) = -spsi*stheta;
- dR_dTheta(2,0) = -ctheta;
+ dR_dTheta(0, 0) = -cpsi * stheta;
+ dR_dTheta(1, 0) = -spsi * stheta;
+ dR_dTheta(2, 0) = -ctheta;
- dR_dTheta(0,1) = cpsi*ctheta*sphi;
- dR_dTheta(1,1) = ctheta*sphi*spsi;
- dR_dTheta(2,1) = -sphi*stheta;
+ dR_dTheta(0, 1) = cpsi * ctheta * sphi;
+ dR_dTheta(1, 1) = ctheta * sphi * spsi;
+ dR_dTheta(2, 1) = -sphi * stheta;
- dR_dTheta(0,2) = cphi*cpsi*ctheta;
- dR_dTheta(1,2) = cphi*ctheta*spsi;
- dR_dTheta(2,2) = -cphi*stheta;
+ dR_dTheta(0, 2) = cphi * cpsi * ctheta;
+ dR_dTheta(1, 2) = cphi * ctheta * spsi;
+ dR_dTheta(2, 2) = -cphi * stheta;
- dR_dPsi(0,0) = -ctheta*spsi;
- dR_dPsi(1,0) = cpsi*ctheta;
- dR_dPsi(2,0) = 0.;
+ dR_dPsi(0, 0) = -ctheta * spsi;
+ dR_dPsi(1, 0) = cpsi * ctheta;
+ dR_dPsi(2, 0) = 0.;
- dR_dPsi(0,1) = -cphi*cpsi - sphi*spsi*stheta;
- dR_dPsi(1,1) = -cphi*spsi + cpsi*sphi*stheta;
- dR_dPsi(2,1) = 0.;
+ dR_dPsi(0, 1) = -cphi * cpsi - sphi * spsi * stheta;
+ dR_dPsi(1, 1) = -cphi * spsi + cpsi * sphi * stheta;
+ dR_dPsi(2, 1) = 0.;
- dR_dPsi(0,2) = cpsi*sphi - cphi*spsi*stheta;
- dR_dPsi(1,2) = sphi*spsi + cphi*cpsi*stheta;
- dR_dPsi(2,2) = 0.;
+ dR_dPsi(0, 2) = cpsi * sphi - cphi * spsi * stheta;
+ dR_dPsi(1, 2) = sphi * spsi + cphi * cpsi * stheta;
+ dR_dPsi(2, 2) = 0.;
g[3] = matricesInnerProd(dR_dPhi, R);
g[4] = matricesInnerProd(dR_dTheta, R);
g[5] = matricesInnerProd(dR_dPsi, R);
}
-
-template <typename PointSource, typename PointTarget> void
-GeneralizedIterativeClosestPoint<PointSource, PointTarget>::estimateRigidTransformationBFGS (const PointCloudSource &cloud_src,
- const std::vector<int> &indices_src,
- const PointCloudTarget &cloud_tgt,
- const std::vector<int> &indices_tgt,
- Eigen::Matrix4f &transformation_matrix)
+template <typename PointSource, typename PointTarget>
+void
+GeneralizedIterativeClosestPoint<PointSource, PointTarget>::
+ estimateRigidTransformationBFGS(const PointCloudSource& cloud_src,
+ const pcl::Indices& indices_src,
+ const PointCloudTarget& cloud_tgt,
+ const pcl::Indices& indices_tgt,
+ Eigen::Matrix4f& transformation_matrix)
{
- if (indices_src.size () < 4) // need at least 4 samples
+ if (indices_src.size() < 4) // need at least 4 samples
{
- PCL_THROW_EXCEPTION (NotEnoughPointsException,
- "[pcl::GeneralizedIterativeClosestPoint::estimateRigidTransformationBFGS] Need at least 4 points to estimate a transform! Source and target have " << indices_src.size () << " points!");
+ PCL_THROW_EXCEPTION(
+ NotEnoughPointsException,
+ "[pcl::GeneralizedIterativeClosestPoint::estimateRigidTransformationBFGS] Need "
+ "at least 4 points to estimate a transform! Source and target have "
+ << indices_src.size() << " points!");
return;
}
// Set the initial solution
- Vector6d x = Vector6d::Zero ();
+ Vector6d x = Vector6d::Zero();
// translation part
- x[0] = transformation_matrix (0,3);
- x[1] = transformation_matrix (1,3);
- x[2] = transformation_matrix (2,3);
+ x[0] = transformation_matrix(0, 3);
+ x[1] = transformation_matrix(1, 3);
+ x[2] = transformation_matrix(2, 3);
// rotation part (Z Y X euler angles convention)
// see: https://en.wikipedia.org/wiki/Rotation_matrix#General_rotations
- x[3] = std::atan2 (transformation_matrix (2,1), transformation_matrix (2,2));
- x[4] = asin (-transformation_matrix (2,0));
- x[5] = std::atan2 (transformation_matrix (1,0), transformation_matrix (0,0));
+ x[3] = std::atan2(transformation_matrix(2, 1), transformation_matrix(2, 2));
+ x[4] = asin(-transformation_matrix(2, 0));
+ x[5] = std::atan2(transformation_matrix(1, 0), transformation_matrix(0, 0));
// Set temporary pointers
tmp_src_ = &cloud_src;
// Optimize using forward-difference approximation LM
OptimizationFunctorWithIndices functor(this);
- BFGS<OptimizationFunctorWithIndices> bfgs (functor);
+ BFGS<OptimizationFunctorWithIndices> bfgs(functor);
bfgs.parameters.sigma = 0.01;
bfgs.parameters.rho = 0.01;
bfgs.parameters.tau1 = 9;
bfgs.parameters.order = 3;
int inner_iterations_ = 0;
- int result = bfgs.minimizeInit (x);
+ int result = bfgs.minimizeInit(x);
result = BFGSSpace::Running;
- do
- {
+ do {
inner_iterations_++;
- result = bfgs.minimizeOneStep (x);
- if(result)
- {
+ result = bfgs.minimizeOneStep(x);
+ if (result) {
break;
}
result = bfgs.testGradient();
- } while(result == BFGSSpace::Running && inner_iterations_ < max_inner_iterations_);
- if(result == BFGSSpace::NoProgress || result == BFGSSpace::Success || inner_iterations_ == max_inner_iterations_)
- {
- PCL_DEBUG ("[pcl::registration::TransformationEstimationBFGS::estimateRigidTransformation]");
- PCL_DEBUG ("BFGS solver finished with exit code %i \n", result);
+ } while (result == BFGSSpace::Running && inner_iterations_ < max_inner_iterations_);
+ if (result == BFGSSpace::NoProgress || result == BFGSSpace::Success ||
+ inner_iterations_ == max_inner_iterations_) {
+ PCL_DEBUG("[pcl::registration::TransformationEstimationBFGS::"
+ "estimateRigidTransformation]");
+ PCL_DEBUG("BFGS solver finished with exit code %i \n", result);
transformation_matrix.setIdentity();
applyState(transformation_matrix, x);
}
else
- PCL_THROW_EXCEPTION(SolverDidntConvergeException,
- "[pcl::" << getClassName () << "::TransformationEstimationBFGS::estimateRigidTransformation] BFGS solver didn't converge!");
+ PCL_THROW_EXCEPTION(
+ SolverDidntConvergeException,
+ "[pcl::" << getClassName()
+ << "::TransformationEstimationBFGS::estimateRigidTransformation] BFGS "
+ "solver didn't converge!");
}
-
-template <typename PointSource, typename PointTarget> inline double
-GeneralizedIterativeClosestPoint<PointSource, PointTarget>::OptimizationFunctorWithIndices::operator() (const Vector6d& x)
+template <typename PointSource, typename PointTarget>
+inline double
+GeneralizedIterativeClosestPoint<PointSource, PointTarget>::
+ OptimizationFunctorWithIndices::operator()(const Vector6d& x)
{
Eigen::Matrix4f transformation_matrix = gicp_->base_transformation_;
gicp_->applyState(transformation_matrix, x);
double f = 0;
- int m = static_cast<int> (gicp_->tmp_idx_src_->size ());
- for (int i = 0; i < m; ++i)
- {
+ int m = static_cast<int>(gicp_->tmp_idx_src_->size());
+ for (int i = 0; i < m; ++i) {
// The last coordinate, p_src[3] is guaranteed to be set to 1.0 in registration.hpp
- Vector4fMapConst p_src = (*gicp_->tmp_src_)[(*gicp_->tmp_idx_src_)[i]].getVector4fMap ();
+ Vector4fMapConst p_src =
+ (*gicp_->tmp_src_)[(*gicp_->tmp_idx_src_)[i]].getVector4fMap();
// The last coordinate, p_tgt[3] is guaranteed to be set to 1.0 in registration.hpp
- Vector4fMapConst p_tgt = (*gicp_->tmp_tgt_)[(*gicp_->tmp_idx_tgt_)[i]].getVector4fMap ();
- Eigen::Vector4f pp (transformation_matrix * p_src);
+ Vector4fMapConst p_tgt =
+ (*gicp_->tmp_tgt_)[(*gicp_->tmp_idx_tgt_)[i]].getVector4fMap();
+ Eigen::Vector4f pp(transformation_matrix * p_src);
// Estimate the distance (cost function)
// The last coordinate is still guaranteed to be set to 1.0
Eigen::Vector3d res(pp[0] - p_tgt[0], pp[1] - p_tgt[1], pp[2] - p_tgt[2]);
- Eigen::Vector3d temp (gicp_->mahalanobis((*gicp_->tmp_idx_src_)[i]) * res);
- //increment= res'*temp/num_matches = temp'*M*temp/num_matches (we postpone 1/num_matches after the loop closes)
- f+= double(res.transpose() * temp);
+ Eigen::Vector3d temp(gicp_->mahalanobis((*gicp_->tmp_idx_src_)[i]) * res);
+ // increment= res'*temp/num_matches = temp'*M*temp/num_matches (we postpone
+ // 1/num_matches after the loop closes)
+ f += double(res.transpose() * temp);
}
- return f/m;
+ return f / m;
}
-
-template <typename PointSource, typename PointTarget> inline void
-GeneralizedIterativeClosestPoint<PointSource, PointTarget>::OptimizationFunctorWithIndices::df (const Vector6d& x, Vector6d& g)
+template <typename PointSource, typename PointTarget>
+inline void
+GeneralizedIterativeClosestPoint<PointSource, PointTarget>::
+ OptimizationFunctorWithIndices::df(const Vector6d& x, Vector6d& g)
{
Eigen::Matrix4f transformation_matrix = gicp_->base_transformation_;
gicp_->applyState(transformation_matrix, x);
- //Zero out g
- g.setZero ();
- //Eigen::Vector3d g_t = g.head<3> ();
- Eigen::Matrix3d R = Eigen::Matrix3d::Zero ();
- int m = static_cast<int> (gicp_->tmp_idx_src_->size ());
- for (int i = 0; i < m; ++i)
- {
+ // Zero out g
+ g.setZero();
+ // Eigen::Vector3d g_t = g.head<3> ();
+ Eigen::Matrix3d R = Eigen::Matrix3d::Zero();
+ int m = static_cast<int>(gicp_->tmp_idx_src_->size());
+ for (int i = 0; i < m; ++i) {
// The last coordinate, p_src[3] is guaranteed to be set to 1.0 in registration.hpp
- Vector4fMapConst p_src = (*gicp_->tmp_src_)[(*gicp_->tmp_idx_src_)[i]].getVector4fMap ();
+ Vector4fMapConst p_src =
+ (*gicp_->tmp_src_)[(*gicp_->tmp_idx_src_)[i]].getVector4fMap();
// The last coordinate, p_tgt[3] is guaranteed to be set to 1.0 in registration.hpp
- Vector4fMapConst p_tgt = (*gicp_->tmp_tgt_)[(*gicp_->tmp_idx_tgt_)[i]].getVector4fMap ();
+ Vector4fMapConst p_tgt =
+ (*gicp_->tmp_tgt_)[(*gicp_->tmp_idx_tgt_)[i]].getVector4fMap();
- Eigen::Vector4f pp (transformation_matrix * p_src);
+ Eigen::Vector4f pp(transformation_matrix * p_src);
// The last coordinate is still guaranteed to be set to 1.0
- Eigen::Vector3d res (pp[0] - p_tgt[0], pp[1] - p_tgt[1], pp[2] - p_tgt[2]);
+ Eigen::Vector3d res(pp[0] - p_tgt[0], pp[1] - p_tgt[1], pp[2] - p_tgt[2]);
// temp = M*res
- Eigen::Vector3d temp (gicp_->mahalanobis ((*gicp_->tmp_idx_src_)[i]) * res);
+ Eigen::Vector3d temp(gicp_->mahalanobis((*gicp_->tmp_idx_src_)[i]) * res);
// Increment translation gradient
- // g.head<3> ()+= 2*M*res/num_matches (we postpone 2/num_matches after the loop closes)
- g.head<3> ()+= temp;
+ // g.head<3> ()+= 2*M*res/num_matches (we postpone 2/num_matches after the loop
+ // closes)
+ g.head<3>() += temp;
// Increment rotation gradient
pp = gicp_->base_transformation_ * p_src;
- Eigen::Vector3d p_src3 (pp[0], pp[1], pp[2]);
- R+= p_src3 * temp.transpose();
+ Eigen::Vector3d p_src3(pp[0], pp[1], pp[2]);
+ R += p_src3 * temp.transpose();
}
- g.head<3> ()*= 2.0/m;
- R*= 2.0/m;
+ g.head<3>() *= 2.0 / m;
+ R *= 2.0 / m;
gicp_->computeRDerivative(x, R, g);
}
-
-template <typename PointSource, typename PointTarget> inline void
-GeneralizedIterativeClosestPoint<PointSource, PointTarget>::OptimizationFunctorWithIndices::fdf (const Vector6d& x, double& f, Vector6d& g)
+template <typename PointSource, typename PointTarget>
+inline void
+GeneralizedIterativeClosestPoint<PointSource, PointTarget>::
+ OptimizationFunctorWithIndices::fdf(const Vector6d& x, double& f, Vector6d& g)
{
Eigen::Matrix4f transformation_matrix = gicp_->base_transformation_;
gicp_->applyState(transformation_matrix, x);
f = 0;
- g.setZero ();
- Eigen::Matrix3d R = Eigen::Matrix3d::Zero ();
- const int m = static_cast<int> (gicp_->tmp_idx_src_->size ());
- for (int i = 0; i < m; ++i)
- {
+ g.setZero();
+ Eigen::Matrix3d R = Eigen::Matrix3d::Zero();
+ const int m = static_cast<int>(gicp_->tmp_idx_src_->size());
+ for (int i = 0; i < m; ++i) {
// The last coordinate, p_src[3] is guaranteed to be set to 1.0 in registration.hpp
- Vector4fMapConst p_src = (*gicp_->tmp_src_)[(*gicp_->tmp_idx_src_)[i]].getVector4fMap ();
+ Vector4fMapConst p_src =
+ (*gicp_->tmp_src_)[(*gicp_->tmp_idx_src_)[i]].getVector4fMap();
// The last coordinate, p_tgt[3] is guaranteed to be set to 1.0 in registration.hpp
- Vector4fMapConst p_tgt = (*gicp_->tmp_tgt_)[(*gicp_->tmp_idx_tgt_)[i]].getVector4fMap ();
- Eigen::Vector4f pp (transformation_matrix * p_src);
+ Vector4fMapConst p_tgt =
+ (*gicp_->tmp_tgt_)[(*gicp_->tmp_idx_tgt_)[i]].getVector4fMap();
+ Eigen::Vector4f pp(transformation_matrix * p_src);
// The last coordinate is still guaranteed to be set to 1.0
- Eigen::Vector3d res (pp[0] - p_tgt[0], pp[1] - p_tgt[1], pp[2] - p_tgt[2]);
+ Eigen::Vector3d res(pp[0] - p_tgt[0], pp[1] - p_tgt[1], pp[2] - p_tgt[2]);
// temp = M*res
- Eigen::Vector3d temp (gicp_->mahalanobis((*gicp_->tmp_idx_src_)[i]) * res);
+ Eigen::Vector3d temp(gicp_->mahalanobis((*gicp_->tmp_idx_src_)[i]) * res);
// Increment total error
- f+= double(res.transpose() * temp);
+ f += double(res.transpose() * temp);
// Increment translation gradient
- // g.head<3> ()+= 2*M*res/num_matches (we postpone 2/num_matches after the loop closes)
- g.head<3> ()+= temp;
+ // g.head<3> ()+= 2*M*res/num_matches (we postpone 2/num_matches after the loop
+ // closes)
+ g.head<3>() += temp;
pp = gicp_->base_transformation_ * p_src;
- Eigen::Vector3d p_src3 (pp[0], pp[1], pp[2]);
+ Eigen::Vector3d p_src3(pp[0], pp[1], pp[2]);
// Increment rotation gradient
- R+= p_src3 * temp.transpose();
+ R += p_src3 * temp.transpose();
}
- f/= double(m);
- g.head<3> ()*= double(2.0/m);
- R*= 2.0/m;
+ f /= double(m);
+ g.head<3>() *= double(2.0 / m);
+ R *= 2.0 / m;
gicp_->computeRDerivative(x, R, g);
}
-template <typename PointSource, typename PointTarget> inline BFGSSpace::Status
-GeneralizedIterativeClosestPoint<PointSource, PointTarget>::OptimizationFunctorWithIndices::checkGradient(const Vector6d& g)
+template <typename PointSource, typename PointTarget>
+inline BFGSSpace::Status
+GeneralizedIterativeClosestPoint<PointSource, PointTarget>::
+ OptimizationFunctorWithIndices::checkGradient(const Vector6d& g)
{
auto translation_epsilon = gicp_->translation_gradient_tolerance_;
auto rotation_epsilon = gicp_->rotation_gradient_tolerance_;
auto rotation_grad = g.tail<3>().norm();
if ((translation_grad < translation_epsilon) && (rotation_grad < rotation_epsilon))
- return BFGSSpace::Success;
+ return BFGSSpace::Success;
return BFGSSpace::Running;
}
-template <typename PointSource, typename PointTarget> inline void
-GeneralizedIterativeClosestPoint<PointSource, PointTarget>::computeTransformation (PointCloudSource &output, const Eigen::Matrix4f& guess)
+template <typename PointSource, typename PointTarget>
+inline void
+GeneralizedIterativeClosestPoint<PointSource, PointTarget>::computeTransformation(
+ PointCloudSource& output, const Eigen::Matrix4f& guess)
{
- pcl::IterativeClosestPoint<PointSource, PointTarget>::initComputeReciprocal ();
+ pcl::IterativeClosestPoint<PointSource, PointTarget>::initComputeReciprocal();
// Difference between consecutive transforms
double delta = 0;
// Get the size of the target
- const std::size_t N = indices_->size ();
+ const std::size_t N = indices_->size();
// Set the mahalanobis matrices to identity
- mahalanobis_.resize (N, Eigen::Matrix3d::Identity ());
+ mahalanobis_.resize(N, Eigen::Matrix3d::Identity());
// Compute target cloud covariance matrices
- if ((!target_covariances_) || (target_covariances_->empty ()))
- {
- target_covariances_.reset (new MatricesVector);
- computeCovariances<PointTarget> (target_, tree_, *target_covariances_);
+ if ((!target_covariances_) || (target_covariances_->empty())) {
+ target_covariances_.reset(new MatricesVector);
+ computeCovariances<PointTarget>(target_, tree_, *target_covariances_);
}
// Compute input cloud covariance matrices
- if ((!input_covariances_) || (input_covariances_->empty ()))
- {
- input_covariances_.reset (new MatricesVector);
- computeCovariances<PointSource> (input_, tree_reciprocal_, *input_covariances_);
+ if ((!input_covariances_) || (input_covariances_->empty())) {
+ input_covariances_.reset(new MatricesVector);
+ computeCovariances<PointSource>(input_, tree_reciprocal_, *input_covariances_);
}
base_transformation_ = Eigen::Matrix4f::Identity();
nr_iterations_ = 0;
converged_ = false;
double dist_threshold = corr_dist_threshold_ * corr_dist_threshold_;
- std::vector<int> nn_indices (1);
- std::vector<float> nn_dists (1);
+ pcl::Indices nn_indices(1);
+ std::vector<float> nn_dists(1);
pcl::transformPointCloud(output, output, guess);
- while(!converged_)
- {
+ while (!converged_) {
std::size_t cnt = 0;
- std::vector<int> source_indices (indices_->size ());
- std::vector<int> target_indices (indices_->size ());
+ pcl::Indices source_indices(indices_->size());
+ pcl::Indices target_indices(indices_->size());
// guess corresponds to base_t and transformation_ to t
- Eigen::Matrix4d transform_R = Eigen::Matrix4d::Zero ();
- for(std::size_t i = 0; i < 4; i++)
- for(std::size_t j = 0; j < 4; j++)
- for(std::size_t k = 0; k < 4; k++)
- transform_R(i,j)+= double(transformation_(i,k)) * double(guess(k,j));
+ Eigen::Matrix4d transform_R = Eigen::Matrix4d::Zero();
+ for (std::size_t i = 0; i < 4; i++)
+ for (std::size_t j = 0; j < 4; j++)
+ for (std::size_t k = 0; k < 4; k++)
+ transform_R(i, j) += double(transformation_(i, k)) * double(guess(k, j));
- Eigen::Matrix3d R = transform_R.topLeftCorner<3,3> ();
+ Eigen::Matrix3d R = transform_R.topLeftCorner<3, 3>();
- for (std::size_t i = 0; i < N; i++)
- {
+ for (std::size_t i = 0; i < N; i++) {
PointSource query = output[i];
- query.getVector4fMap () = transformation_ * query.getVector4fMap ();
+ query.getVector4fMap() = transformation_ * query.getVector4fMap();
- if (!searchForNeighbors (query, nn_indices, nn_dists))
- {
- PCL_ERROR ("[pcl::%s::computeTransformation] Unable to find a nearest neighbor in the target dataset for point %d in the source!\n", getClassName ().c_str (), (*indices_)[i]);
+ if (!searchForNeighbors(query, nn_indices, nn_dists)) {
+ PCL_ERROR("[pcl::%s::computeTransformation] Unable to find a nearest neighbor "
+ "in the target dataset for point %d in the source!\n",
+ getClassName().c_str(),
+ (*indices_)[i]);
return;
}
- // Check if the distance to the nearest neighbor is smaller than the user imposed threshold
- if (nn_dists[0] < dist_threshold)
- {
- Eigen::Matrix3d &C1 = (*input_covariances_)[i];
- Eigen::Matrix3d &C2 = (*target_covariances_)[nn_indices[0]];
- Eigen::Matrix3d &M = mahalanobis_[i];
+ // Check if the distance to the nearest neighbor is smaller than the user imposed
+ // threshold
+ if (nn_dists[0] < dist_threshold) {
+ Eigen::Matrix3d& C1 = (*input_covariances_)[i];
+ Eigen::Matrix3d& C2 = (*target_covariances_)[nn_indices[0]];
+ Eigen::Matrix3d& M = mahalanobis_[i];
// M = R*C1
M = R * C1;
// temp = M*R' + C2 = R*C1*R' + C2
Eigen::Matrix3d temp = M * R.transpose();
- temp+= C2;
+ temp += C2;
// M = temp^-1
- M = temp.inverse ();
- source_indices[cnt] = static_cast<int> (i);
+ M = temp.inverse();
+ source_indices[cnt] = static_cast<int>(i);
target_indices[cnt] = nn_indices[0];
cnt++;
}
}
// Resize to the actual number of valid correspondences
- source_indices.resize(cnt); target_indices.resize(cnt);
+ source_indices.resize(cnt);
+ target_indices.resize(cnt);
/* optimize transformation using the current assignment and Mahalanobis metrics*/
previous_transformation_ = transformation_;
- //optimization right here
- try
- {
- rigid_transformation_estimation_(output, source_indices, *target_, target_indices, transformation_);
+ // optimization right here
+ try {
+ rigid_transformation_estimation_(
+ output, source_indices, *target_, target_indices, transformation_);
/* compute the delta from this iteration */
delta = 0.;
- for(int k = 0; k < 4; k++) {
- for(int l = 0; l < 4; l++) {
+ for (int k = 0; k < 4; k++) {
+ for (int l = 0; l < 4; l++) {
double ratio = 1;
- if(k < 3 && l < 3) // rotation part of the transform
- ratio = 1./rotation_epsilon_;
+ if (k < 3 && l < 3) // rotation part of the transform
+ ratio = 1. / rotation_epsilon_;
else
- ratio = 1./transformation_epsilon_;
- double c_delta = ratio*std::abs(previous_transformation_(k,l) - transformation_(k,l));
- if(c_delta > delta)
+ ratio = 1. / transformation_epsilon_;
+ double c_delta =
+ ratio * std::abs(previous_transformation_(k, l) - transformation_(k, l));
+ if (c_delta > delta)
delta = c_delta;
}
}
- }
- catch (PCLException &e)
- {
- PCL_DEBUG ("[pcl::%s::computeTransformation] Optimization issue %s\n", getClassName ().c_str (), e.what ());
+ } catch (PCLException& e) {
+ PCL_DEBUG("[pcl::%s::computeTransformation] Optimization issue %s\n",
+ getClassName().c_str(),
+ e.what());
break;
}
nr_iterations_++;
// Check for convergence
- if (nr_iterations_ >= max_iterations_ || delta < 1)
- {
+ if (nr_iterations_ >= max_iterations_ || delta < 1) {
converged_ = true;
- PCL_DEBUG ("[pcl::%s::computeTransformation] Convergence reached. Number of iterations: %d out of %d. Transformation difference: %f\n",
- getClassName ().c_str (), nr_iterations_, max_iterations_, (transformation_ - previous_transformation_).array ().abs ().sum ());
+ PCL_DEBUG("[pcl::%s::computeTransformation] Convergence reached. Number of "
+ "iterations: %d out of %d. Transformation difference: %f\n",
+ getClassName().c_str(),
+ nr_iterations_,
+ max_iterations_,
+ (transformation_ - previous_transformation_).array().abs().sum());
previous_transformation_ = transformation_;
}
else
- PCL_DEBUG ("[pcl::%s::computeTransformation] Convergence failed\n", getClassName ().c_str ());
+ PCL_DEBUG("[pcl::%s::computeTransformation] Convergence failed\n",
+ getClassName().c_str());
}
final_transformation_ = previous_transformation_ * guess;
// Transform the point cloud
- pcl::transformPointCloud (*input_, output, final_transformation_);
+ pcl::transformPointCloud(*input_, output, final_transformation_);
}
-
-template <typename PointSource, typename PointTarget> void
-GeneralizedIterativeClosestPoint<PointSource, PointTarget>::applyState(Eigen::Matrix4f &t, const Vector6d& x) const
+template <typename PointSource, typename PointTarget>
+void
+GeneralizedIterativeClosestPoint<PointSource, PointTarget>::applyState(
+ Eigen::Matrix4f& t, const Vector6d& x) const
{
// Z Y X euler angles convention
Eigen::Matrix3f R;
- R = Eigen::AngleAxisf (static_cast<float> (x[5]), Eigen::Vector3f::UnitZ ())
- * Eigen::AngleAxisf (static_cast<float> (x[4]), Eigen::Vector3f::UnitY ())
- * Eigen::AngleAxisf (static_cast<float> (x[3]), Eigen::Vector3f::UnitX ());
- t.topLeftCorner<3,3> ().matrix () = R * t.topLeftCorner<3,3> ().matrix ();
- Eigen::Vector4f T (static_cast<float> (x[0]), static_cast<float> (x[1]), static_cast<float> (x[2]), 0.0f);
- t.col (3) += T;
+ R = Eigen::AngleAxisf(static_cast<float>(x[5]), Eigen::Vector3f::UnitZ()) *
+ Eigen::AngleAxisf(static_cast<float>(x[4]), Eigen::Vector3f::UnitY()) *
+ Eigen::AngleAxisf(static_cast<float>(x[3]), Eigen::Vector3f::UnitX());
+ t.topLeftCorner<3, 3>().matrix() = R * t.topLeftCorner<3, 3>().matrix();
+ Eigen::Vector4f T(static_cast<float>(x[0]),
+ static_cast<float>(x[1]),
+ static_cast<float>(x[2]),
+ 0.0f);
+ t.col(3) += T;
}
} // namespace pcl
-#endif //PCL_REGISTRATION_IMPL_GICP_HPP_
-
+#endif // PCL_REGISTRATION_IMPL_GICP_HPP_
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
#ifndef PCL_REGISTRATION_IMPL_IA_FPCS_H_
#define PCL_REGISTRATION_IMPL_IA_FPCS_H_
-#include <pcl/registration/ia_fpcs.h>
-#include <pcl/common/time.h>
#include <pcl/common/distances.h>
+#include <pcl/common/time.h>
#include <pcl/common/utils.h>
-#include <pcl/sample_consensus/sac_model_plane.h>
+#include <pcl/registration/ia_fpcs.h>
#include <pcl/registration/transformation_estimation_3point.h>
+#include <pcl/sample_consensus/sac_model_plane.h>
///////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointT> inline float
-pcl::getMeanPointDensity (const typename pcl::PointCloud<PointT>::ConstPtr &cloud, float max_dist, int nr_threads)
+template <typename PointT>
+inline float
+pcl::getMeanPointDensity(const typename pcl::PointCloud<PointT>::ConstPtr& cloud,
+ float max_dist,
+ int nr_threads)
{
const float max_dist_sqr = max_dist * max_dist;
- const std::size_t s = cloud.size ();
+ const std::size_t s = cloud.size();
- pcl::search::KdTree <PointT> tree;
- tree.setInputCloud (cloud);
+ pcl::search::KdTree<PointT> tree;
+ tree.setInputCloud(cloud);
float mean_dist = 0.f;
int num = 0;
- std::vector <int> ids (2);
- std::vector <float> dists_sqr (2);
+ pcl::Indices ids(2);
+ std::vector<float> dists_sqr(2);
pcl::utils::ignore(nr_threads);
#pragma omp parallel for \
reduction(+:mean_dist, num) \
firstprivate(s, max_dist_sqr) \
num_threads(nr_threads)
- for (int i = 0; i < 1000; i++)
- {
- tree.nearestKSearch ((*cloud)[rand () % s], 2, ids, dists_sqr);
- if (dists_sqr[1] < max_dist_sqr)
- {
- mean_dist += std::sqrt (dists_sqr[1]);
+ for (int i = 0; i < 1000; i++) {
+ tree.nearestKSearch((*cloud)[rand() % s], 2, ids, dists_sqr);
+ if (dists_sqr[1] < max_dist_sqr) {
+ mean_dist += std::sqrt(dists_sqr[1]);
num++;
}
}
return (mean_dist / num);
};
-
///////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointT> inline float
-pcl::getMeanPointDensity (const typename pcl::PointCloud<PointT>::ConstPtr &cloud, const std::vector <int> &indices,
- float max_dist, int nr_threads)
+template <typename PointT>
+inline float
+pcl::getMeanPointDensity(const typename pcl::PointCloud<PointT>::ConstPtr& cloud,
+ const pcl::Indices& indices,
+ float max_dist,
+ int nr_threads)
{
const float max_dist_sqr = max_dist * max_dist;
- const std::size_t s = indices.size ();
+ const std::size_t s = indices.size();
- pcl::search::KdTree <PointT> tree;
- tree.setInputCloud (cloud);
+ pcl::search::KdTree<PointT> tree;
+ tree.setInputCloud(cloud);
float mean_dist = 0.f;
int num = 0;
- std::vector <int> ids (2);
- std::vector <float> dists_sqr (2);
+ pcl::Indices ids(2);
+ std::vector<float> dists_sqr(2);
pcl::utils::ignore(nr_threads);
#if OPENMP_LEGACY_CONST_DATA_SHARING_RULE
reduction(+:mean_dist, num) \
num_threads(nr_threads)
#endif
- for (int i = 0; i < 1000; i++)
- {
- tree.nearestKSearch ((*cloud)[indices[rand () % s]], 2, ids, dists_sqr);
- if (dists_sqr[1] < max_dist_sqr)
- {
- mean_dist += std::sqrt (dists_sqr[1]);
+ for (int i = 0; i < 1000; i++) {
+ tree.nearestKSearch((*cloud)[indices[rand() % s]], 2, ids, dists_sqr);
+ if (dists_sqr[1] < max_dist_sqr) {
+ mean_dist += std::sqrt(dists_sqr[1]);
num++;
}
}
return (mean_dist / num);
};
-
///////////////////////////////////////////////////////////////////////////////////////////
template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar>
-pcl::registration::FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::FPCSInitialAlignment () :
- source_normals_ (),
- target_normals_ (),
- nr_threads_ (1),
- approx_overlap_ (0.5f),
- delta_ (1.f),
- score_threshold_ (FLT_MAX),
- nr_samples_ (0),
- max_norm_diff_ (90.f),
- max_runtime_ (0),
- fitness_score_ (FLT_MAX),
- diameter_ (),
- max_base_diameter_sqr_ (),
- use_normals_ (false),
- normalize_delta_ (true),
- max_pair_diff_ (),
- max_edge_diff_ (),
- coincidation_limit_ (),
- max_mse_ (),
- max_inlier_dist_sqr_ (),
- small_error_ (0.00001f)
+pcl::registration::FPCSInitialAlignment<PointSource, PointTarget, NormalT, Scalar>::
+ FPCSInitialAlignment()
+: source_normals_()
+, target_normals_()
+, nr_threads_(1)
+, approx_overlap_(0.5f)
+, delta_(1.f)
+, score_threshold_(FLT_MAX)
+, nr_samples_(0)
+, max_norm_diff_(90.f)
+, max_runtime_(0)
+, fitness_score_(FLT_MAX)
+, diameter_()
+, max_base_diameter_sqr_()
+, use_normals_(false)
+, normalize_delta_(true)
+, max_pair_diff_()
+, max_edge_diff_()
+, coincidation_limit_()
+, max_mse_()
+, max_inlier_dist_sqr_()
+, small_error_(0.00001f)
{
reg_name_ = "pcl::registration::FPCSInitialAlignment";
max_iterations_ = 0;
ransac_iterations_ = 1000;
- transformation_estimation_.reset (new pcl::registration::TransformationEstimation3Point <PointSource, PointTarget>);
+ transformation_estimation_.reset(
+ new pcl::registration::TransformationEstimation3Point<PointSource, PointTarget>);
}
-
///////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar> void
-pcl::registration::FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::computeTransformation (
- PointCloudSource &output,
- const Eigen::Matrix4f &guess)
+template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar>
+void
+pcl::registration::FPCSInitialAlignment<PointSource, PointTarget, NormalT, Scalar>::
+ computeTransformation(PointCloudSource& output, const Eigen::Matrix4f& guess)
{
- if (!initCompute ())
+ if (!initCompute())
return;
final_transformation_ = guess;
bool abort = false;
- std::vector <MatchingCandidates> all_candidates (max_iterations_);
+ std::vector<MatchingCandidates> all_candidates(max_iterations_);
pcl::StopWatch timer;
- #pragma omp parallel \
- default(none) \
- shared(abort, all_candidates, timer) \
+#pragma omp parallel default(none) shared(abort, all_candidates, timer) \
num_threads(nr_threads_)
{
- #ifdef _OPENMP
- std::srand (static_cast <unsigned int> (std::time (NULL)) ^ omp_get_thread_num ());
- #pragma omp for schedule(dynamic)
- #endif
- for (int i = 0; i < max_iterations_; i++)
- {
- #pragma omp flush (abort)
-
- MatchingCandidates candidates (1);
- std::vector <int> base_indices (4);
+#ifdef _OPENMP
+ const unsigned int seed =
+ static_cast<unsigned int>(std::time(NULL)) ^ omp_get_thread_num();
+ std::srand(seed);
+ PCL_DEBUG("[%s::computeTransformation] Using seed=%u\n", reg_name_.c_str(), seed);
+#pragma omp for schedule(dynamic)
+#endif
+ for (int i = 0; i < max_iterations_; i++) {
+#pragma omp flush(abort)
+
+ MatchingCandidates candidates(1);
+ pcl::Indices base_indices(4);
all_candidates[i] = candidates;
- if (!abort)
- {
+ if (!abort) {
float ratio[2];
// select four coplanar point base
- if (selectBase (base_indices, ratio) == 0)
- {
+ if (selectBase(base_indices, ratio) == 0) {
// calculate candidate pair correspondences using diagonal lengths of base
pcl::Correspondences pairs_a, pairs_b;
- if (bruteForceCorrespondences (base_indices[0], base_indices[1], pairs_a) == 0 &&
- bruteForceCorrespondences (base_indices[2], base_indices[3], pairs_b) == 0)
- {
- // determine candidate matches by combining pair correspondences based on segment distances
- std::vector <std::vector <int> > matches;
- if (determineBaseMatches (base_indices, matches, pairs_a, pairs_b, ratio) == 0)
- {
+ if (bruteForceCorrespondences(base_indices[0], base_indices[1], pairs_a) ==
+ 0 &&
+ bruteForceCorrespondences(base_indices[2], base_indices[3], pairs_b) ==
+ 0) {
+ // determine candidate matches by combining pair correspondences based on
+ // segment distances
+ std::vector<pcl::Indices> matches;
+ if (determineBaseMatches(base_indices, matches, pairs_a, pairs_b, ratio) ==
+ 0) {
// check and evaluate candidate matches and store them
- handleMatches (base_indices, matches, candidates);
- if (!candidates.empty ())
+ handleMatches(base_indices, matches, candidates);
+ if (!candidates.empty())
all_candidates[i] = candidates;
}
}
}
// check terminate early (time or fitness_score threshold reached)
- abort = (!candidates.empty () ? candidates[0].fitness_score < score_threshold_ : abort);
- abort = (abort ? abort : timer.getTimeSeconds () > max_runtime_);
+ abort = (!candidates.empty() ? candidates[0].fitness_score < score_threshold_
+ : abort);
+ abort = (abort ? abort : timer.getTimeSeconds() > max_runtime_);
-
- #pragma omp flush (abort)
+#pragma omp flush(abort)
}
}
}
-
// determine best match over all tries
- finalCompute (all_candidates);
+ finalCompute(all_candidates);
// apply the final transformation
- pcl::transformPointCloud (*input_, output, final_transformation_);
+ pcl::transformPointCloud(*input_, output, final_transformation_);
- deinitCompute ();
+ deinitCompute();
}
-
///////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar> bool
-pcl::registration::FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::initCompute ()
+template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar>
+bool
+pcl::registration::FPCSInitialAlignment<PointSource, PointTarget, NormalT, Scalar>::
+ initCompute()
{
- std::srand (static_cast <unsigned int> (std::time (nullptr)));
+ const unsigned int seed = std::time(nullptr);
+ std::srand(seed);
+ PCL_DEBUG("[%s::initCompute] Using seed=%u\n", reg_name_.c_str(), seed);
// basic pcl initialization
- if (!pcl::PCLBase <PointSource>::initCompute ())
+ if (!pcl::PCLBase<PointSource>::initCompute())
return (false);
// check if source and target are given
- if (!input_ || !target_)
- {
- PCL_ERROR ("[%s::initCompute] Source or target dataset not given!\n", reg_name_.c_str ());
+ if (!input_ || !target_) {
+ PCL_ERROR("[%s::initCompute] Source or target dataset not given!\n",
+ reg_name_.c_str());
return (false);
}
- if (!target_indices_ || target_indices_->empty ())
- {
- target_indices_.reset (new std::vector <int> (static_cast <int> (target_->size ())));
+ if (!target_indices_ || target_indices_->empty()) {
+ target_indices_.reset(new pcl::Indices(target_->size()));
int index = 0;
- for (int &target_index : *target_indices_)
+ for (auto& target_index : *target_indices_)
target_index = index++;
target_cloud_updated_ = true;
}
- // if a sample size for the point clouds is given; prefarably no sampling of target cloud
- if (nr_samples_ != 0)
- {
- const int ss = static_cast <int> (indices_->size ());
- const int sample_fraction_src = std::max (1, static_cast <int> (ss / nr_samples_));
+ // if a sample size for the point clouds is given; prefarably no sampling of target
+ // cloud
+ if (nr_samples_ != 0) {
+ const int ss = static_cast<int>(indices_->size());
+ const int sample_fraction_src = std::max(1, static_cast<int>(ss / nr_samples_));
- source_indices_ = pcl::IndicesPtr (new std::vector <int>);
+ source_indices_ = pcl::IndicesPtr(new pcl::Indices);
for (int i = 0; i < ss; i++)
- if (rand () % sample_fraction_src == 0)
- source_indices_->push_back ((*indices_) [i]);
+ if (rand() % sample_fraction_src == 0)
+ source_indices_->push_back((*indices_)[i]);
}
else
source_indices_ = indices_;
// check usage of normals
- if (source_normals_ && target_normals_ && source_normals_->size () == input_->size () && target_normals_->size () == target_->size ())
+ if (source_normals_ && target_normals_ && source_normals_->size() == input_->size() &&
+ target_normals_->size() == target_->size())
use_normals_ = true;
// set up tree structures
- if (target_cloud_updated_)
- {
- tree_->setInputCloud (target_, target_indices_);
+ if (target_cloud_updated_) {
+ tree_->setInputCloud(target_, target_indices_);
target_cloud_updated_ = false;
}
// get diameter of input cloud (distance between farthest points)
Eigen::Vector4f pt_min, pt_max;
- pcl::getMinMax3D (*target_, *target_indices_, pt_min, pt_max);
- diameter_ = (pt_max - pt_min).norm ();
+ pcl::getMinMax3D(*target_, *target_indices_, pt_min, pt_max);
+ diameter_ = (pt_max - pt_min).norm();
// derive the limits for the random base selection
- float max_base_diameter = diameter_* approx_overlap_ * 2.f;
+ float max_base_diameter = diameter_ * approx_overlap_ * 2.f;
max_base_diameter_sqr_ = max_base_diameter * max_base_diameter;
// normalize the delta
- if (normalize_delta_)
- {
- float mean_dist = getMeanPointDensity <PointTarget> (target_, *target_indices_, 0.05f * diameter_, nr_threads_);
+ if (normalize_delta_) {
+ float mean_dist = getMeanPointDensity<PointTarget>(
+ target_, *target_indices_, 0.05f * diameter_, nr_threads_);
delta_ *= mean_dist;
}
- // heuristic determination of number of trials to have high probability of finding a good solution
- if (max_iterations_ == 0)
- {
- float first_est = std::log (small_error_) / std::log (1.0 - std::pow ((double) approx_overlap_, (double) min_iterations));
- max_iterations_ = static_cast <int> (first_est / (diameter_fraction * approx_overlap_ * 2.f));
+ // heuristic determination of number of trials to have high probability of finding a
+ // good solution
+ if (max_iterations_ == 0) {
+ float first_est =
+ std::log(small_error_) /
+ std::log(1.0 - std::pow((double)approx_overlap_, (double)min_iterations));
+ max_iterations_ =
+ static_cast<int>(first_est / (diameter_fraction * approx_overlap_ * 2.f));
}
// set further parameter
max_pair_diff_ = delta_ * 2.f;
max_edge_diff_ = delta_ * 4.f;
coincidation_limit_ = delta_ * 2.f; // EDITED: originally std::sqrt (delta_ * 2.f)
- max_mse_ = powf (delta_* 2.f, 2.f);
- max_inlier_dist_sqr_ = powf (delta_ * 2.f, 2.f);
+ max_mse_ = powf(delta_ * 2.f, 2.f);
+ max_inlier_dist_sqr_ = powf(delta_ * 2.f, 2.f);
// reset fitness_score
fitness_score_ = FLT_MAX;
return (true);
}
-
///////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar> int
-pcl::registration::FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::selectBase (
- std::vector <int> &base_indices,
- float (&ratio)[2])
+template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar>
+int
+pcl::registration::FPCSInitialAlignment<PointSource, PointTarget, NormalT, Scalar>::
+ selectBase(pcl::Indices& base_indices, float (&ratio)[2])
{
- const float too_close_sqr = max_base_diameter_sqr_*0.01;
+ const float too_close_sqr = max_base_diameter_sqr_ * 0.01;
- Eigen::VectorXf coefficients (4);
- pcl::SampleConsensusModelPlane <PointTarget> plane (target_);
- plane.setIndices (target_indices_);
+ Eigen::VectorXf coefficients(4);
+ pcl::SampleConsensusModelPlane<PointTarget> plane(target_);
+ plane.setIndices(target_indices_);
Eigen::Vector4f centre_pt;
float nearest_to_plane = FLT_MAX;
- // repeat base search until valid quadruple was found or ransac_iterations_ number of tries were unsuccessful
- for (int i = 0; i < ransac_iterations_; i++)
- {
+ // repeat base search until valid quadruple was found or ransac_iterations_ number of
+ // tries were unsuccessful
+ for (int i = 0; i < ransac_iterations_; i++) {
// random select an appropriate point triple
- if (selectBaseTriangle (base_indices) < 0)
+ if (selectBaseTriangle(base_indices) < 0)
continue;
- std::vector <int> base_triple (base_indices.begin (), base_indices.end () - 1);
- plane.computeModelCoefficients (base_triple, coefficients);
- pcl::compute3DCentroid (*target_, base_triple, centre_pt);
+ pcl::Indices base_triple(base_indices.begin(), base_indices.end() - 1);
+ plane.computeModelCoefficients(base_triple, coefficients);
+ pcl::compute3DCentroid(*target_, base_triple, centre_pt);
// loop over all points in source cloud to find most suitable fourth point
- const PointTarget *pt1 = &((*target_)[base_indices[0]]);
- const PointTarget *pt2 = &((*target_)[base_indices[1]]);
- const PointTarget *pt3 = &((*target_)[base_indices[2]]);
-
- for (const int &target_index : *target_indices_)
- {
- const PointTarget *pt4 = &((*target_)[target_index]);
-
- float d1 = pcl::squaredEuclideanDistance (*pt4, *pt1);
- float d2 = pcl::squaredEuclideanDistance (*pt4, *pt2);
- float d3 = pcl::squaredEuclideanDistance (*pt4, *pt3);
- float d4 = (pt4->getVector3fMap () - centre_pt.head (3)).squaredNorm ();
-
- // check distance between points w.r.t minimum sampling distance; EDITED -> 4th point now also limited by max base line
- if (d1 < too_close_sqr || d2 < too_close_sqr || d3 < too_close_sqr || d4 < too_close_sqr ||
- d1 > max_base_diameter_sqr_ || d2 > max_base_diameter_sqr_ || d3 > max_base_diameter_sqr_)
+ const PointTarget* pt1 = &((*target_)[base_indices[0]]);
+ const PointTarget* pt2 = &((*target_)[base_indices[1]]);
+ const PointTarget* pt3 = &((*target_)[base_indices[2]]);
+
+ for (const auto& target_index : *target_indices_) {
+ const PointTarget* pt4 = &((*target_)[target_index]);
+
+ float d1 = pcl::squaredEuclideanDistance(*pt4, *pt1);
+ float d2 = pcl::squaredEuclideanDistance(*pt4, *pt2);
+ float d3 = pcl::squaredEuclideanDistance(*pt4, *pt3);
+ float d4 = (pt4->getVector3fMap() - centre_pt.head(3)).squaredNorm();
+
+ // check distance between points w.r.t minimum sampling distance; EDITED -> 4th
+ // point now also limited by max base line
+ if (d1 < too_close_sqr || d2 < too_close_sqr || d3 < too_close_sqr ||
+ d4 < too_close_sqr || d1 > max_base_diameter_sqr_ ||
+ d2 > max_base_diameter_sqr_ || d3 > max_base_diameter_sqr_)
continue;
// check distance to plane to get point closest to plane
- float dist_to_plane = pcl::pointToPlaneDistance (*pt4, coefficients);
- if (dist_to_plane < nearest_to_plane)
- {
+ float dist_to_plane = pcl::pointToPlaneDistance(*pt4, coefficients);
+ if (dist_to_plane < nearest_to_plane) {
base_indices[3] = target_index;
nearest_to_plane = dist_to_plane;
}
}
// check if at least one point fulfilled the conditions
- if (nearest_to_plane != FLT_MAX)
- {
- // order points to build largest quadrangle and calcuate intersection ratios of diagonals
- setupBase (base_indices, ratio);
+ if (nearest_to_plane != FLT_MAX) {
+ // order points to build largest quadrangle and calcuate intersection ratios of
+ // diagonals
+ setupBase(base_indices, ratio);
return (0);
}
}
return (-1);
}
-
///////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar> int
-pcl::registration::FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::selectBaseTriangle (std::vector <int> &base_indices)
+template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar>
+int
+pcl::registration::FPCSInitialAlignment<PointSource, PointTarget, NormalT, Scalar>::
+ selectBaseTriangle(pcl::Indices& base_indices)
{
- int nr_points = static_cast <int> (target_indices_->size ());
+ const auto nr_points = target_indices_->size();
float best_t = 0.f;
// choose random first point
- base_indices[0] = (*target_indices_)[rand () % nr_points];
- int *index1 = &base_indices[0];
+ base_indices[0] = (*target_indices_)[rand() % nr_points];
+ auto* index1 = &base_indices[0];
// random search for 2 other points (as far away as overlap allows)
- for (int i = 0; i < ransac_iterations_; i++)
- {
- int *index2 = &(*target_indices_)[rand () % nr_points];
- int *index3 = &(*target_indices_)[rand () % nr_points];
+ for (int i = 0; i < ransac_iterations_; i++) {
+ auto* index2 = &(*target_indices_)[rand() % nr_points];
+ auto* index3 = &(*target_indices_)[rand() % nr_points];
- Eigen::Vector3f u = (*target_)[*index2].getVector3fMap () - (*target_)[*index1].getVector3fMap ();
- Eigen::Vector3f v = (*target_)[*index3].getVector3fMap () - (*target_)[*index1].getVector3fMap ();
- float t = u.cross (v).squaredNorm (); // triangle area (0.5 * sqrt(t)) should be maximal
+ Eigen::Vector3f u =
+ (*target_)[*index2].getVector3fMap() - (*target_)[*index1].getVector3fMap();
+ Eigen::Vector3f v =
+ (*target_)[*index3].getVector3fMap() - (*target_)[*index1].getVector3fMap();
+ float t =
+ u.cross(v).squaredNorm(); // triangle area (0.5 * sqrt(t)) should be maximal
// check for most suitable point triple
- if (t > best_t && u.squaredNorm () < max_base_diameter_sqr_ && v.squaredNorm () < max_base_diameter_sqr_)
- {
+ if (t > best_t && u.squaredNorm() < max_base_diameter_sqr_ &&
+ v.squaredNorm() < max_base_diameter_sqr_) {
best_t = t;
base_indices[1] = *index2;
base_indices[2] = *index3;
return (best_t == 0.f ? -1 : 0);
}
-
///////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar> void
-pcl::registration::FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::setupBase (
- std::vector <int> &base_indices,
- float (&ratio)[2])
+template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar>
+void
+pcl::registration::FPCSInitialAlignment<PointSource, PointTarget, NormalT, Scalar>::
+ setupBase(pcl::Indices& base_indices, float (&ratio)[2])
{
float best_t = FLT_MAX;
- const std::vector <int> copy (base_indices.begin (), base_indices.end ());
- std::vector <int> temp (base_indices.begin (), base_indices.end ());
+ const pcl::Indices copy(base_indices.begin(), base_indices.end());
+ pcl::Indices temp(base_indices.begin(), base_indices.end());
// loop over all combinations of base points
- for (std::vector <int>::const_iterator i = copy.begin (), i_e = copy.end (); i != i_e; ++i)
- for (std::vector <int>::const_iterator j = copy.begin (), j_e = copy.end (); j != j_e; ++j)
- {
- if (i == j)
- continue;
-
- for (std::vector <int>::const_iterator k = copy.begin (), k_e = copy.end (); k != k_e; ++k)
- {
- if (k == j || k == i)
+ for (auto i = copy.begin(), i_e = copy.end(); i != i_e; ++i)
+ for (auto j = copy.begin(), j_e = copy.end(); j != j_e; ++j) {
+ if (i == j)
continue;
- std::vector <int>::const_iterator l = copy.begin ();
- while (l == i || l == j || l == k)
- ++l;
-
- temp[0] = *i;
- temp[1] = *j;
- temp[2] = *k;
- temp[3] = *l;
-
- // calculate diagonal intersection ratios and check for suitable segment to segment distances
- float ratio_temp[2];
- float t = segmentToSegmentDist (temp, ratio_temp);
- if (t < best_t)
- {
- best_t = t;
- ratio[0] = ratio_temp[0];
- ratio[1] = ratio_temp[1];
- base_indices = temp;
+ for (auto k = copy.begin(), k_e = copy.end(); k != k_e; ++k) {
+ if (k == j || k == i)
+ continue;
+
+ auto l = copy.begin();
+ while (l == i || l == j || l == k)
+ ++l;
+
+ temp[0] = *i;
+ temp[1] = *j;
+ temp[2] = *k;
+ temp[3] = *l;
+
+ // calculate diagonal intersection ratios and check for suitable segment to
+ // segment distances
+ float ratio_temp[2];
+ float t = segmentToSegmentDist(temp, ratio_temp);
+ if (t < best_t) {
+ best_t = t;
+ ratio[0] = ratio_temp[0];
+ ratio[1] = ratio_temp[1];
+ base_indices = temp;
+ }
}
}
- }
}
-
///////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar> float
-pcl::registration::FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::segmentToSegmentDist (
- const std::vector <int> &base_indices,
- float (&ratio)[2])
+template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar>
+float
+pcl::registration::FPCSInitialAlignment<PointSource, PointTarget, NormalT, Scalar>::
+ segmentToSegmentDist(const pcl::Indices& base_indices, float (&ratio)[2])
{
// get point vectors
- Eigen::Vector3f u = (*target_)[base_indices[1]].getVector3fMap () - (*target_)[base_indices[0]].getVector3fMap ();
- Eigen::Vector3f v = (*target_)[base_indices[3]].getVector3fMap () - (*target_)[base_indices[2]].getVector3fMap ();
- Eigen::Vector3f w = (*target_)[base_indices[0]].getVector3fMap () - (*target_)[base_indices[2]].getVector3fMap ();
+ Eigen::Vector3f u = (*target_)[base_indices[1]].getVector3fMap() -
+ (*target_)[base_indices[0]].getVector3fMap();
+ Eigen::Vector3f v = (*target_)[base_indices[3]].getVector3fMap() -
+ (*target_)[base_indices[2]].getVector3fMap();
+ Eigen::Vector3f w = (*target_)[base_indices[0]].getVector3fMap() -
+ (*target_)[base_indices[2]].getVector3fMap();
// calculate segment distances
- float a = u.dot (u);
- float b = u.dot (v);
- float c = v.dot (v);
- float d = u.dot (w);
- float e = v.dot (w);
+ float a = u.dot(u);
+ float b = u.dot(v);
+ float c = v.dot(v);
+ float d = u.dot(w);
+ float e = v.dot(w);
float D = a * c - b * b;
float sN = 0.f, sD = D;
float tN = 0.f, tD = D;
// check segments
- if (D < small_error_)
- {
+ if (D < small_error_) {
sN = 0.f;
sD = 1.f;
tN = e;
tD = c;
}
- else
- {
+ else {
sN = (b * e - c * d);
tN = (a * e - b * d);
- if (sN < 0.f)
- {
+ if (sN < 0.f) {
sN = 0.f;
tN = e;
tD = c;
}
- else if (sN > sD)
- {
+ else if (sN > sD) {
sN = sD;
tN = e + b;
tD = c;
}
}
- if (tN < 0.f)
- {
+ if (tN < 0.f) {
tN = 0.f;
if (-d < 0.f)
else if (-d > a)
sN = sD;
- else
- {
+ else {
sN = -d;
sD = a;
}
}
- else if (tN > tD)
- {
+ else if (tN > tD) {
tN = tD;
if ((-d + b) < 0.f)
else if ((-d + b) > a)
sN = sD;
- else
- {
+ else {
sN = (-d + b);
sD = a;
}
}
// set intersection ratios
- ratio[0] = (std::abs (sN) < small_error_) ? 0.f : sN / sD;
- ratio[1] = (std::abs (tN) < small_error_) ? 0.f : tN / tD;
+ ratio[0] = (std::abs(sN) < small_error_) ? 0.f : sN / sD;
+ ratio[1] = (std::abs(tN) < small_error_) ? 0.f : tN / tD;
Eigen::Vector3f x = w + (ratio[0] * u) - (ratio[1] * v);
- return (x.norm ());
+ return (x.norm());
}
-
///////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar> int
-pcl::registration::FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::bruteForceCorrespondences (
- int idx1,
- int idx2,
- pcl::Correspondences &pairs)
+template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar>
+int
+pcl::registration::FPCSInitialAlignment<PointSource, PointTarget, NormalT, Scalar>::
+ bruteForceCorrespondences(int idx1, int idx2, pcl::Correspondences& pairs)
{
const float max_norm_diff = 0.5f * max_norm_diff_ * M_PI / 180.f;
// calculate reference segment distance and normal angle
- float ref_dist = pcl::euclideanDistance ((*target_)[idx1], (*target_)[idx2]);
- float ref_norm_angle = (use_normals_ ? ((*target_normals_)[idx1].getNormalVector3fMap () -
- (*target_normals_)[idx2].getNormalVector3fMap ()).norm () : 0.f);
+ float ref_dist = pcl::euclideanDistance((*target_)[idx1], (*target_)[idx2]);
+ float ref_norm_angle =
+ (use_normals_ ? ((*target_normals_)[idx1].getNormalVector3fMap() -
+ (*target_normals_)[idx2].getNormalVector3fMap())
+ .norm()
+ : 0.f);
// loop over all pairs of points in source point cloud
- auto it_out = source_indices_->begin (), it_out_e = source_indices_->end () - 1;
- auto it_in_e = source_indices_->end ();
- for ( ; it_out != it_out_e; it_out++)
- {
+ auto it_out = source_indices_->begin(), it_out_e = source_indices_->end() - 1;
+ auto it_in_e = source_indices_->end();
+ for (; it_out != it_out_e; it_out++) {
auto it_in = it_out + 1;
- const PointSource *pt1 = &(*input_)[*it_out];
- for ( ; it_in != it_in_e; it_in++)
- {
- const PointSource *pt2 = &(*input_)[*it_in];
+ const PointSource* pt1 = &(*input_)[*it_out];
+ for (; it_in != it_in_e; it_in++) {
+ const PointSource* pt2 = &(*input_)[*it_in];
// check point distance compared to reference dist (from base)
- float dist = pcl::euclideanDistance (*pt1, *pt2);
- if (std::abs(dist - ref_dist) < max_pair_diff_)
- {
+ float dist = pcl::euclideanDistance(*pt1, *pt2);
+ if (std::abs(dist - ref_dist) < max_pair_diff_) {
// add here normal evaluation if normals are given
- if (use_normals_)
- {
- const NormalT *pt1_n = &((*source_normals_)[*it_out]);
- const NormalT *pt2_n = &((*source_normals_)[*it_in]);
+ if (use_normals_) {
+ const NormalT* pt1_n = &((*source_normals_)[*it_out]);
+ const NormalT* pt2_n = &((*source_normals_)[*it_in]);
- float norm_angle_1 = (pt1_n->getNormalVector3fMap () - pt2_n->getNormalVector3fMap ()).norm ();
- float norm_angle_2 = (pt1_n->getNormalVector3fMap () + pt2_n->getNormalVector3fMap ()).norm ();
+ float norm_angle_1 =
+ (pt1_n->getNormalVector3fMap() - pt2_n->getNormalVector3fMap()).norm();
+ float norm_angle_2 =
+ (pt1_n->getNormalVector3fMap() + pt2_n->getNormalVector3fMap()).norm();
- float norm_diff = std::min <float> (std::abs (norm_angle_1 - ref_norm_angle), std::abs (norm_angle_2 - ref_norm_angle));
+ float norm_diff = std::min<float>(std::abs(norm_angle_1 - ref_norm_angle),
+ std::abs(norm_angle_2 - ref_norm_angle));
if (norm_diff > max_norm_diff)
continue;
}
- pairs.push_back (pcl::Correspondence (*it_in, *it_out, dist));
- pairs.push_back (pcl::Correspondence (*it_out, *it_in, dist));
+ pairs.push_back(pcl::Correspondence(*it_in, *it_out, dist));
+ pairs.push_back(pcl::Correspondence(*it_out, *it_in, dist));
}
}
}
// return success if at least one correspondence was found
- return (pairs.empty () ? -1 : 0);
+ return (pairs.empty() ? -1 : 0);
}
-
///////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar> int
-pcl::registration::FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::determineBaseMatches (
- const std::vector <int> &base_indices,
- std::vector <std::vector <int> > &matches,
- const pcl::Correspondences &pairs_a,
- const pcl::Correspondences &pairs_b,
- const float (&ratio)[2])
+template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar>
+int
+pcl::registration::FPCSInitialAlignment<PointSource, PointTarget, NormalT, Scalar>::
+ determineBaseMatches(const pcl::Indices& base_indices,
+ std::vector<pcl::Indices>& matches,
+ const pcl::Correspondences& pairs_a,
+ const pcl::Correspondences& pairs_b,
+ const float (&ratio)[2])
{
// calculate edge lengths of base
float dist_base[4];
- dist_base[0] = pcl::euclideanDistance ((*target_)[base_indices[0]], (*target_)[base_indices[2]]);
- dist_base[1] = pcl::euclideanDistance ((*target_)[base_indices[0]], (*target_)[base_indices[3]]);
- dist_base[2] = pcl::euclideanDistance ((*target_)[base_indices[1]], (*target_)[base_indices[2]]);
- dist_base[3] = pcl::euclideanDistance ((*target_)[base_indices[1]], (*target_)[base_indices[3]]);
-
- // loop over first point pair correspondences and store intermediate points 'e' in new point cloud
- PointCloudSourcePtr cloud_e (new PointCloudSource);
- cloud_e->resize (pairs_a.size () * 2);
- PointCloudSourceIterator it_pt = cloud_e->begin ();
- for (const auto &pair : pairs_a)
- {
- const PointSource *pt1 = &((*input_)[pair.index_match]);
- const PointSource *pt2 = &((*input_)[pair.index_query]);
+ dist_base[0] =
+ pcl::euclideanDistance((*target_)[base_indices[0]], (*target_)[base_indices[2]]);
+ dist_base[1] =
+ pcl::euclideanDistance((*target_)[base_indices[0]], (*target_)[base_indices[3]]);
+ dist_base[2] =
+ pcl::euclideanDistance((*target_)[base_indices[1]], (*target_)[base_indices[2]]);
+ dist_base[3] =
+ pcl::euclideanDistance((*target_)[base_indices[1]], (*target_)[base_indices[3]]);
+
+ // loop over first point pair correspondences and store intermediate points 'e' in new
+ // point cloud
+ PointCloudSourcePtr cloud_e(new PointCloudSource);
+ cloud_e->resize(pairs_a.size() * 2);
+ PointCloudSourceIterator it_pt = cloud_e->begin();
+ for (const auto& pair : pairs_a) {
+ const PointSource* pt1 = &((*input_)[pair.index_match]);
+ const PointSource* pt2 = &((*input_)[pair.index_query]);
// calculate intermediate points using both ratios from base (r1,r2)
- for (int i = 0; i < 2; i++, it_pt++)
- {
+ for (int i = 0; i < 2; i++, it_pt++) {
it_pt->x = pt1->x + ratio[i] * (pt2->x - pt1->x);
it_pt->y = pt1->y + ratio[i] * (pt2->y - pt1->y);
it_pt->z = pt1->z + ratio[i] * (pt2->z - pt1->z);
}
// initialize new kd tree of intermediate points from first point pair correspondences
- KdTreeReciprocalPtr tree_e (new KdTreeReciprocal);
- tree_e->setInputCloud (cloud_e);
+ KdTreeReciprocalPtr tree_e(new KdTreeReciprocal);
+ tree_e->setInputCloud(cloud_e);
- std::vector <int> ids;
- std::vector <float> dists_sqr;
+ pcl::Indices ids;
+ std::vector<float> dists_sqr;
// loop over second point pair correspondences
- for (const auto &pair : pairs_b)
- {
- const PointTarget *pt1 = &((*input_)[pair.index_match]);
- const PointTarget *pt2 = &((*input_)[pair.index_query]);
+ for (const auto& pair : pairs_b) {
+ const PointTarget* pt1 = &((*input_)[pair.index_match]);
+ const PointTarget* pt2 = &((*input_)[pair.index_query]);
// calculate intermediate points using both ratios from base (r1,r2)
- for (const float &r : ratio)
- {
+ for (const float& r : ratio) {
PointTarget pt_e;
pt_e.x = pt1->x + r * (pt2->x - pt1->x);
pt_e.y = pt1->y + r * (pt2->y - pt1->y);
pt_e.z = pt1->z + r * (pt2->z - pt1->z);
// search for corresponding intermediate points
- tree_e->radiusSearch (pt_e, coincidation_limit_, ids, dists_sqr);
- for (const int &id : ids)
- {
- std::vector <int> match_indices (4);
-
- match_indices[0] = pairs_a[static_cast <int> (std::floor ((float)(id/2.f)))].index_match;
- match_indices[1] = pairs_a[static_cast <int> (std::floor ((float)(id/2.f)))].index_query;
+ tree_e->radiusSearch(pt_e, coincidation_limit_, ids, dists_sqr);
+ for (const auto& id : ids) {
+ pcl::Indices match_indices(4);
+
+ match_indices[0] =
+ pairs_a[static_cast<int>(std::floor((float)(id / 2.f)))].index_match;
+ match_indices[1] =
+ pairs_a[static_cast<int>(std::floor((float)(id / 2.f)))].index_query;
match_indices[2] = pair.index_match;
match_indices[3] = pair.index_query;
// EDITED: added coarse check of match based on edge length (due to rigid-body )
- if (checkBaseMatch (match_indices, dist_base) < 0)
+ if (checkBaseMatch(match_indices, dist_base) < 0)
continue;
- matches.push_back (match_indices);
+ matches.push_back(match_indices);
}
}
}
// return unsuccessful if no match was found
- return (!matches.empty () ? 0 : -1);
+ return (!matches.empty() ? 0 : -1);
}
-
///////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar> int
-pcl::registration::FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::checkBaseMatch (
- const std::vector <int> &match_indices,
- const float (&dist_ref)[4])
+template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar>
+int
+pcl::registration::FPCSInitialAlignment<PointSource, PointTarget, NormalT, Scalar>::
+ checkBaseMatch(const pcl::Indices& match_indices, const float (&dist_ref)[4])
{
- float d0 = pcl::euclideanDistance ((*input_)[match_indices[0]], (*input_)[match_indices[2]]);
- float d1 = pcl::euclideanDistance ((*input_)[match_indices[0]], (*input_)[match_indices[3]]);
- float d2 = pcl::euclideanDistance ((*input_)[match_indices[1]], (*input_)[match_indices[2]]);
- float d3 = pcl::euclideanDistance ((*input_)[match_indices[1]], (*input_)[match_indices[3]]);
+ float d0 =
+ pcl::euclideanDistance((*input_)[match_indices[0]], (*input_)[match_indices[2]]);
+ float d1 =
+ pcl::euclideanDistance((*input_)[match_indices[0]], (*input_)[match_indices[3]]);
+ float d2 =
+ pcl::euclideanDistance((*input_)[match_indices[1]], (*input_)[match_indices[2]]);
+ float d3 =
+ pcl::euclideanDistance((*input_)[match_indices[1]], (*input_)[match_indices[3]]);
// check edge distances of match w.r.t the base
- return (std::abs (d0 - dist_ref[0]) < max_edge_diff_ && std::abs (d1 - dist_ref[1]) < max_edge_diff_ &&
- std::abs (d2 - dist_ref[2]) < max_edge_diff_ && std::abs (d3 - dist_ref[3]) < max_edge_diff_) ? 0 : -1;
+ return (std::abs(d0 - dist_ref[0]) < max_edge_diff_ &&
+ std::abs(d1 - dist_ref[1]) < max_edge_diff_ &&
+ std::abs(d2 - dist_ref[2]) < max_edge_diff_ &&
+ std::abs(d3 - dist_ref[3]) < max_edge_diff_)
+ ? 0
+ : -1;
}
-
///////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar> void
-pcl::registration::FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::handleMatches (
- const std::vector <int> &base_indices,
- std::vector <std::vector <int> > &matches,
- MatchingCandidates &candidates)
+template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar>
+void
+pcl::registration::FPCSInitialAlignment<PointSource, PointTarget, NormalT, Scalar>::
+ handleMatches(const pcl::Indices& base_indices,
+ std::vector<pcl::Indices>& matches,
+ MatchingCandidates& candidates)
{
- candidates.resize (1);
+ candidates.resize(1);
float fitness_score = FLT_MAX;
// loop over all Candidate matches
- for (auto &match : matches)
- {
+ for (auto& match : matches) {
Eigen::Matrix4f transformation_temp;
pcl::Correspondences correspondences_temp;
- // determine corresondences between base and match according to their distance to centroid
- linkMatchWithBase (base_indices, match, correspondences_temp);
+ // determine corresondences between base and match according to their distance to
+ // centroid
+ linkMatchWithBase(base_indices, match, correspondences_temp);
// check match based on residuals of the corresponding points after
- if (validateMatch (base_indices, match, correspondences_temp, transformation_temp) < 0)
+ if (validateMatch(base_indices, match, correspondences_temp, transformation_temp) <
+ 0)
continue;
- // check resulting using a sub sample of the source point cloud and compare to previous matches
- if (validateTransformation (transformation_temp, fitness_score) < 0)
+ // check resulting using a sub sample of the source point cloud and compare to
+ // previous matches
+ if (validateTransformation(transformation_temp, fitness_score) < 0)
continue;
// store best match as well as associated fitness_score and transformation
candidates[0].fitness_score = fitness_score;
- candidates [0].transformation = transformation_temp;
- correspondences_temp.erase (correspondences_temp.end () - 1);
+ candidates[0].transformation = transformation_temp;
+ correspondences_temp.erase(correspondences_temp.end() - 1);
candidates[0].correspondences = correspondences_temp;
}
}
-
///////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar> void
-pcl::registration::FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::linkMatchWithBase (
- const std::vector <int> &base_indices,
- std::vector <int> &match_indices,
- pcl::Correspondences &correspondences)
+template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar>
+void
+pcl::registration::FPCSInitialAlignment<PointSource, PointTarget, NormalT, Scalar>::
+ linkMatchWithBase(const pcl::Indices& base_indices,
+ pcl::Indices& match_indices,
+ pcl::Correspondences& correspondences)
{
// calculate centroid of base and target
- Eigen::Vector4f centre_base {0, 0, 0, 0}, centre_match {0, 0, 0, 0};
- pcl::compute3DCentroid (*target_, base_indices, centre_base);
- pcl::compute3DCentroid (*input_, match_indices, centre_match);
+ Eigen::Vector4f centre_base{0, 0, 0, 0}, centre_match{0, 0, 0, 0};
+ pcl::compute3DCentroid(*target_, base_indices, centre_base);
+ pcl::compute3DCentroid(*input_, match_indices, centre_match);
PointTarget centre_pt_base;
centre_pt_base.x = centre_base[0];
centre_pt_match.z = centre_match[2];
// find corresponding points according to their distance to the centroid
- std::vector <int> copy = match_indices;
-
- auto it_match_orig = match_indices.begin ();
- for (auto it_base = base_indices.cbegin (), it_base_e = base_indices.cend (); it_base != it_base_e; it_base++, it_match_orig++)
- {
- float dist_sqr_1 = pcl::squaredEuclideanDistance ((*target_)[*it_base], centre_pt_base);
+ pcl::Indices copy = match_indices;
+
+ auto it_match_orig = match_indices.begin();
+ for (auto it_base = base_indices.cbegin(), it_base_e = base_indices.cend();
+ it_base != it_base_e;
+ it_base++, it_match_orig++) {
+ float dist_sqr_1 =
+ pcl::squaredEuclideanDistance((*target_)[*it_base], centre_pt_base);
float best_diff_sqr = FLT_MAX;
int best_index = -1;
- for (const int &match_index : copy)
- {
+ for (const auto& match_index : copy) {
// calculate difference of distances to centre point
- float dist_sqr_2 = pcl::squaredEuclideanDistance ((*input_)[match_index], centre_pt_match);
+ float dist_sqr_2 =
+ pcl::squaredEuclideanDistance((*input_)[match_index], centre_pt_match);
float diff_sqr = std::abs(dist_sqr_1 - dist_sqr_2);
- if (diff_sqr < best_diff_sqr)
- {
+ if (diff_sqr < best_diff_sqr) {
best_diff_sqr = diff_sqr;
best_index = match_index;
}
}
// assign new correspondence and update indices of matched targets
- correspondences.push_back (pcl::Correspondence (best_index, *it_base, best_diff_sqr));
+ correspondences.push_back(pcl::Correspondence(best_index, *it_base, best_diff_sqr));
*it_match_orig = best_index;
}
}
-
///////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar> int
-pcl::registration::FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::validateMatch (
- const std::vector <int> &base_indices,
- const std::vector <int> &match_indices,
- const pcl::Correspondences &correspondences,
- Eigen::Matrix4f &transformation)
+template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar>
+int
+pcl::registration::FPCSInitialAlignment<PointSource, PointTarget, NormalT, Scalar>::
+ validateMatch(const pcl::Indices& base_indices,
+ const pcl::Indices& match_indices,
+ const pcl::Correspondences& correspondences,
+ Eigen::Matrix4f& transformation)
{
// only use triplet of points to simlify process (possible due to planar case)
pcl::Correspondences correspondences_temp = correspondences;
- correspondences_temp.erase (correspondences_temp.end () - 1);
+ correspondences_temp.erase(correspondences_temp.end() - 1);
// estimate transformation between correspondence set
- transformation_estimation_->estimateRigidTransformation (*input_, *target_, correspondences_temp, transformation);
+ transformation_estimation_->estimateRigidTransformation(
+ *input_, *target_, correspondences_temp, transformation);
// transform base points
PointCloudSource match_transformed;
- pcl::transformPointCloud (*input_, match_indices, match_transformed, transformation);
+ pcl::transformPointCloud(*input_, match_indices, match_transformed, transformation);
// calculate residuals of transformation and check against maximum threshold
- std::size_t nr_points = correspondences_temp.size ();
+ std::size_t nr_points = correspondences_temp.size();
float mse = 0.f;
for (std::size_t i = 0; i < nr_points; i++)
- mse += pcl::squaredEuclideanDistance (match_transformed.points [i], target_->points [base_indices[i]]);
+ mse += pcl::squaredEuclideanDistance(match_transformed.points[i],
+ target_->points[base_indices[i]]);
mse /= nr_points;
return (mse < max_mse_ ? 0 : -1);
}
-
///////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar> int
-pcl::registration::FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::validateTransformation (
- Eigen::Matrix4f &transformation,
- float &fitness_score)
+template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar>
+int
+pcl::registration::FPCSInitialAlignment<PointSource, PointTarget, NormalT, Scalar>::
+ validateTransformation(Eigen::Matrix4f& transformation, float& fitness_score)
{
// transform source point cloud
PointCloudSource source_transformed;
- pcl::transformPointCloud (*input_, *source_indices_, source_transformed, transformation);
+ pcl::transformPointCloud(
+ *input_, *source_indices_, source_transformed, transformation);
- std::size_t nr_points = source_transformed.size ();
- std::size_t terminate_value = fitness_score > 1 ? 0 : static_cast <std::size_t> ((1.f - fitness_score) * nr_points);
+ std::size_t nr_points = source_transformed.size();
+ std::size_t terminate_value =
+ fitness_score > 1 ? 0
+ : static_cast<std::size_t>((1.f - fitness_score) * nr_points);
float inlier_score_temp = 0;
- std::vector <int> ids;
- std::vector <float> dists_sqr;
- PointCloudSourceIterator it = source_transformed.begin ();
+ pcl::Indices ids;
+ std::vector<float> dists_sqr;
+ PointCloudSourceIterator it = source_transformed.begin();
- for (std::size_t i = 0; i < nr_points; it++, i++)
- {
+ for (std::size_t i = 0; i < nr_points; it++, i++) {
// search for nearest point using kd tree search
- tree_->nearestKSearch (*it, 1, ids, dists_sqr);
+ tree_->nearestKSearch(*it, 1, ids, dists_sqr);
inlier_score_temp += (dists_sqr[0] < max_inlier_dist_sqr_ ? 1 : 0);
// early terminating
}
// check current costs and return unsuccessful if larger than previous ones
- inlier_score_temp /= static_cast <float> (nr_points);
+ inlier_score_temp /= static_cast<float>(nr_points);
float fitness_score_temp = 1.f - inlier_score_temp;
if (fitness_score_temp > fitness_score)
return (0);
}
-
///////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar> void
-pcl::registration::FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::finalCompute (
- const std::vector <MatchingCandidates > &candidates)
+template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar>
+void
+pcl::registration::FPCSInitialAlignment<PointSource, PointTarget, NormalT, Scalar>::
+ finalCompute(const std::vector<MatchingCandidates>& candidates)
{
// get best fitness_score over all tries
- int nr_candidates = static_cast <int> (candidates.size ());
+ int nr_candidates = static_cast<int>(candidates.size());
int best_index = -1;
float best_score = FLT_MAX;
- for (int i = 0; i < nr_candidates; i++)
- {
- const float &fitness_score = candidates [i][0].fitness_score;
- if (fitness_score < best_score)
- {
+ for (int i = 0; i < nr_candidates; i++) {
+ const float& fitness_score = candidates[i][0].fitness_score;
+ if (fitness_score < best_score) {
best_score = fitness_score;
best_index = i;
}
}
// check if a valid candidate was available
- if (!(best_index < 0))
- {
- fitness_score_ = candidates [best_index][0].fitness_score;
- final_transformation_ = candidates [best_index][0].transformation;
- *correspondences_ = candidates [best_index][0].correspondences;
+ if (!(best_index < 0)) {
+ fitness_score_ = candidates[best_index][0].fitness_score;
+ final_transformation_ = candidates[best_index][0].transformation;
+ *correspondences_ = candidates[best_index][0].correspondences;
// here we define convergence if resulting fitness_score is below 1-threshold
converged_ = fitness_score_ < score_threshold_;
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
#ifndef PCL_REGISTRATION_IMPL_IA_KFPCS_H_
#define PCL_REGISTRATION_IMPL_IA_KFPCS_H_
+namespace pcl {
-namespace pcl
-{
-
-namespace registration
-{
+namespace registration {
template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar>
-KFPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::KFPCSInitialAlignment () :
- lower_trl_boundary_ (-1.f),
- upper_trl_boundary_ (-1.f),
- lambda_ (0.5f),
- use_trl_score_ (false),
- indices_validation_ (new std::vector <int>)
+KFPCSInitialAlignment<PointSource, PointTarget, NormalT, Scalar>::
+ KFPCSInitialAlignment()
+: lower_trl_boundary_(-1.f)
+, upper_trl_boundary_(-1.f)
+, lambda_(0.5f)
+, use_trl_score_(false)
+, indices_validation_(new pcl::Indices)
{
reg_name_ = "pcl::registration::KFPCSInitialAlignment";
}
-
-template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar> bool
-KFPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::initCompute ()
+template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar>
+bool
+KFPCSInitialAlignment<PointSource, PointTarget, NormalT, Scalar>::initCompute()
{
// due to sparse keypoint cloud, do not normalize delta with estimated point density
- if (normalize_delta_)
- {
- PCL_WARN ("[%s::initCompute] Delta should be set according to keypoint precision! Normalization according to point cloud density is ignored.", reg_name_.c_str ());
+ if (normalize_delta_) {
+ PCL_WARN("[%s::initCompute] Delta should be set according to keypoint precision! "
+ "Normalization according to point cloud density is ignored.\n",
+ reg_name_.c_str());
normalize_delta_ = false;
}
// initialize as in fpcs
- pcl::registration::FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::initCompute ();
+ pcl::registration::FPCSInitialAlignment<PointSource, PointTarget, NormalT, Scalar>::
+ initCompute();
// set the threshold values with respect to keypoint charactersitics
- max_pair_diff_ = delta_ * 1.414f; // diff between 2 points of delta_ accuracy
+ max_pair_diff_ = delta_ * 1.414f; // diff between 2 points of delta_ accuracy
coincidation_limit_ = delta_ * 2.828f; // diff between diff of 2 points
- max_edge_diff_ = delta_ * 3.f; // diff between 2 points + some inaccuracy due to quadruple orientation
- max_mse_ = powf (delta_ * 4.f, 2.f); // diff between 2 points + some registration inaccuracy
- max_inlier_dist_sqr_ = powf (delta_ * 8.f, 2.f); // set rel. high, because MSAC is used (residual based score function)
+ max_edge_diff_ =
+ delta_ *
+ 3.f; // diff between 2 points + some inaccuracy due to quadruple orientation
+ max_mse_ =
+ powf(delta_ * 4.f, 2.f); // diff between 2 points + some registration inaccuracy
+ max_inlier_dist_sqr_ =
+ powf(delta_ * 8.f,
+ 2.f); // set rel. high, because MSAC is used (residual based score function)
// check use of translation costs and calculate upper boundary if not set by user
if (upper_trl_boundary_ < 0)
else
lambda_ = 0.f;
- // generate a subset of indices of size ransac_iterations_ on which to evaluate candidates on
- std::size_t nr_indices = indices_->size ();
- if (nr_indices < std::size_t (ransac_iterations_))
+ // generate a subset of indices of size ransac_iterations_ on which to evaluate
+ // candidates on
+ std::size_t nr_indices = indices_->size();
+ if (nr_indices < std::size_t(ransac_iterations_))
indices_validation_ = indices_;
else
for (int i = 0; i < ransac_iterations_; i++)
- indices_validation_->push_back ((*indices_)[rand () % nr_indices]);
+ indices_validation_->push_back((*indices_)[rand() % nr_indices]);
return (true);
}
-
-template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar> void
-KFPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::handleMatches (
- const std::vector <int> &base_indices,
- std::vector <std::vector <int> > &matches,
- MatchingCandidates &candidates)
+template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar>
+void
+KFPCSInitialAlignment<PointSource, PointTarget, NormalT, Scalar>::handleMatches(
+ const pcl::Indices& base_indices,
+ std::vector<pcl::Indices>& matches,
+ MatchingCandidates& candidates)
{
- candidates.clear ();
+ candidates.clear();
// loop over all Candidate matches
- for (auto &match : matches)
- {
+ for (auto& match : matches) {
Eigen::Matrix4f transformation_temp;
pcl::Correspondences correspondences_temp;
- float fitness_score = FLT_MAX; // reset to FLT_MAX to accept all candidates and not only best
+ float fitness_score =
+ FLT_MAX; // reset to FLT_MAX to accept all candidates and not only best
- // determine corresondences between base and match according to their distance to centroid
- linkMatchWithBase (base_indices, match, correspondences_temp);
+ // determine corresondences between base and match according to their distance to
+ // centroid
+ linkMatchWithBase(base_indices, match, correspondences_temp);
// check match based on residuals of the corresponding points after transformation
- if (validateMatch (base_indices, match, correspondences_temp, transformation_temp) < 0)
+ if (validateMatch(base_indices, match, correspondences_temp, transformation_temp) <
+ 0)
continue;
// check resulting transformation using a sub sample of the source point cloud
// all candidates are stored and later sorted according to their fitness score
- validateTransformation (transformation_temp, fitness_score);
+ validateTransformation(transformation_temp, fitness_score);
// store all valid match as well as associated score and transformation
- candidates.push_back (MatchingCandidate (fitness_score, correspondences_temp, transformation_temp));
+ candidates.push_back(
+ MatchingCandidate(fitness_score, correspondences_temp, transformation_temp));
}
}
-
-template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar> int
-KFPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::validateTransformation (
- Eigen::Matrix4f &transformation,
- float &fitness_score)
+template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar>
+int
+KFPCSInitialAlignment<PointSource, PointTarget, NormalT, Scalar>::
+ validateTransformation(Eigen::Matrix4f& transformation, float& fitness_score)
{
// transform sub sampled source cloud
PointCloudSource source_transformed;
- pcl::transformPointCloud (*input_, *indices_validation_, source_transformed, transformation);
+ pcl::transformPointCloud(
+ *input_, *indices_validation_, source_transformed, transformation);
- const std::size_t nr_points = source_transformed.size ();
+ const std::size_t nr_points = source_transformed.size();
float score_a = 0.f, score_b = 0.f;
// residual costs based on mse
- std::vector <int> ids;
- std::vector <float> dists_sqr;
- for (PointCloudSourceIterator it = source_transformed.begin (), it_e = source_transformed.end (); it != it_e; ++it)
- {
+ pcl::Indices ids;
+ std::vector<float> dists_sqr;
+ for (PointCloudSourceIterator it = source_transformed.begin(),
+ it_e = source_transformed.end();
+ it != it_e;
+ ++it) {
// search for nearest point using kd tree search
- tree_->nearestKSearch (*it, 1, ids, dists_sqr);
- score_a += (dists_sqr[0] < max_inlier_dist_sqr_ ? dists_sqr[0] : max_inlier_dist_sqr_); // MSAC
+ tree_->nearestKSearch(*it, 1, ids, dists_sqr);
+ score_a += (dists_sqr[0] < max_inlier_dist_sqr_ ? dists_sqr[0]
+ : max_inlier_dist_sqr_); // MSAC
}
score_a /= (max_inlier_dist_sqr_ * nr_points); // MSAC
- //score_a = 1.f - (1.f - score_a) / (1.f - approx_overlap_); // make score relative to estimated overlap
+ // score_a = 1.f - (1.f - score_a) / (1.f - approx_overlap_); // make score relative
+ // to estimated overlap
// translation score (solutions with small translation are down-voted)
float scale = 1.f;
- if (use_trl_score_)
- {
- float trl = transformation.rightCols <1> ().head (3).norm ();
- float trl_ratio = (trl - lower_trl_boundary_) / (upper_trl_boundary_ - lower_trl_boundary_);
-
- score_b = (trl_ratio < 0.f ? 1.f : (trl_ratio > 1.f ? 0.f : 0.5f * sin (M_PI * trl_ratio + M_PI_2) + 0.5f)); // sinusoidal costs
+ if (use_trl_score_) {
+ float trl = transformation.rightCols<1>().head(3).norm();
+ float trl_ratio =
+ (trl - lower_trl_boundary_) / (upper_trl_boundary_ - lower_trl_boundary_);
+
+ score_b =
+ (trl_ratio < 0.f ? 1.f
+ : (trl_ratio > 1.f ? 0.f
+ : 0.5f * sin(M_PI * trl_ratio + M_PI_2) +
+ 0.5f)); // sinusoidal costs
scale += lambda_;
}
return (0);
}
-
-template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar> void
-KFPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::finalCompute (
- const std::vector <MatchingCandidates > &candidates)
+template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar>
+void
+KFPCSInitialAlignment<PointSource, PointTarget, NormalT, Scalar>::finalCompute(
+ const std::vector<MatchingCandidates>& candidates)
{
// reorganize candidates into single vector
std::size_t total_size = 0;
- for (const auto &candidate : candidates)
- total_size += candidate.size ();
+ for (const auto& candidate : candidates)
+ total_size += candidate.size();
- candidates_.clear ();
- candidates_.reserve (total_size);
+ candidates_.clear();
+ candidates_.reserve(total_size);
- for (const auto &candidate : candidates)
- for (const auto &match : candidate)
- candidates_.push_back (match);
+ for (const auto& candidate : candidates)
+ for (const auto& match : candidate)
+ candidates_.push_back(match);
// sort according to score value
- std::sort (candidates_.begin (), candidates_.end (), by_score ());
+ std::sort(candidates_.begin(), candidates_.end(), by_score());
// return here if no score was valid, i.e. all scores are FLT_MAX
- if (candidates_[0].fitness_score == FLT_MAX)
- {
+ if (candidates_[0].fitness_score == FLT_MAX) {
converged_ = false;
return;
}
// save best candidate as output result
- // note, all other candidates are accessible via getNBestCandidates () and getTBestCandidates ()
- fitness_score_ = candidates_ [0].fitness_score;
- final_transformation_ = candidates_ [0].transformation;
- *correspondences_ = candidates_ [0].correspondences;
+ // note, all other candidates are accessible via getNBestCandidates () and
+ // getTBestCandidates ()
+ fitness_score_ = candidates_[0].fitness_score;
+ final_transformation_ = candidates_[0].transformation;
+ *correspondences_ = candidates_[0].correspondences;
// here we define convergence if resulting score is above threshold
converged_ = fitness_score_ < score_threshold_;
}
-
-template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar> void
-KFPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::getNBestCandidates (
- int n,
- float min_angle3d,
- float min_translation3d,
- MatchingCandidates &candidates)
+template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar>
+void
+KFPCSInitialAlignment<PointSource, PointTarget, NormalT, Scalar>::getNBestCandidates(
+ int n, float min_angle3d, float min_translation3d, MatchingCandidates& candidates)
{
- candidates.clear ();
+ candidates.clear();
// loop over all candidates starting from the best one
- for (MatchingCandidates::iterator it_candidate = candidates_.begin (), it_e = candidates_.end (); it_candidate != it_e; ++it_candidate)
- {
+ for (MatchingCandidates::iterator it_candidate = candidates_.begin(),
+ it_e = candidates_.end();
+ it_candidate != it_e;
+ ++it_candidate) {
// stop if current candidate has no valid score
if (it_candidate->fitness_score == FLT_MAX)
return;
- // check if current candidate is a unique one compared to previous using the min_diff threshold
+ // check if current candidate is a unique one compared to previous using the
+ // min_diff threshold
bool unique = true;
- MatchingCandidates::iterator it = candidates.begin (), it_e2 = candidates.end ();
- while (unique && it != it_e2)
- {
- Eigen::Matrix4f diff = it_candidate->transformation.colPivHouseholderQr ().solve (it->transformation);
- const float angle3d = Eigen::AngleAxisf (diff.block <3, 3> (0, 0)).angle ();
- const float translation3d = diff.block <3, 1> (0, 3).norm ();
+ MatchingCandidates::iterator it = candidates.begin(), it_e2 = candidates.end();
+ while (unique && it != it_e2) {
+ Eigen::Matrix4f diff =
+ it_candidate->transformation.colPivHouseholderQr().solve(it->transformation);
+ const float angle3d = Eigen::AngleAxisf(diff.block<3, 3>(0, 0)).angle();
+ const float translation3d = diff.block<3, 1>(0, 3).norm();
unique = angle3d > min_angle3d && translation3d > min_translation3d;
++it;
}
// add candidate to best candidates
if (unique)
- candidates.push_back (*it_candidate);
+ candidates.push_back(*it_candidate);
// stop if n candidates are reached
- if (candidates.size () == n)
+ if (candidates.size() == n)
return;
}
}
-
-template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar> void
-KFPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::getTBestCandidates (
- float t,
- float min_angle3d,
- float min_translation3d,
- MatchingCandidates &candidates)
+template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar>
+void
+KFPCSInitialAlignment<PointSource, PointTarget, NormalT, Scalar>::getTBestCandidates(
+ float t, float min_angle3d, float min_translation3d, MatchingCandidates& candidates)
{
- candidates.clear ();
+ candidates.clear();
// loop over all candidates starting from the best one
- for (MatchingCandidates::iterator it_candidate = candidates_.begin (), it_e = candidates_.end (); it_candidate != it_e; ++it_candidate)
- {
+ for (MatchingCandidates::iterator it_candidate = candidates_.begin(),
+ it_e = candidates_.end();
+ it_candidate != it_e;
+ ++it_candidate) {
// stop if current candidate has score below threshold
if (it_candidate->fitness_score > t)
return;
- // check if current candidate is a unique one compared to previous using the min_diff threshold
+ // check if current candidate is a unique one compared to previous using the
+ // min_diff threshold
bool unique = true;
- MatchingCandidates::iterator it = candidates.begin (), it_e2 = candidates.end ();
- while (unique && it != it_e2)
- {
- Eigen::Matrix4f diff = it_candidate->transformation.colPivHouseholderQr ().solve (it->transformation);
- const float angle3d = Eigen::AngleAxisf (diff.block <3, 3> (0, 0)).angle ();
- const float translation3d = diff.block <3, 1> (0, 3).norm ();
+ MatchingCandidates::iterator it = candidates.begin(), it_e2 = candidates.end();
+ while (unique && it != it_e2) {
+ Eigen::Matrix4f diff =
+ it_candidate->transformation.colPivHouseholderQr().solve(it->transformation);
+ const float angle3d = Eigen::AngleAxisf(diff.block<3, 3>(0, 0)).angle();
+ const float translation3d = diff.block<3, 1>(0, 3).norm();
unique = angle3d > min_angle3d && translation3d > min_translation3d;
++it;
}
// add candidate to best candidates
if (unique)
- candidates.push_back (*it_candidate);
+ candidates.push_back(*it_candidate);
}
}
} // namespace pcl
#endif // PCL_REGISTRATION_IMPL_IA_KFPCS_H_
-
#include <pcl/common/distances.h>
+namespace pcl {
-namespace pcl
+template <typename PointSource, typename PointTarget, typename FeatureT>
+void
+SampleConsensusInitialAlignment<PointSource, PointTarget, FeatureT>::setSourceFeatures(
+ const FeatureCloudConstPtr& features)
{
-
-template <typename PointSource, typename PointTarget, typename FeatureT> void
-SampleConsensusInitialAlignment<PointSource, PointTarget, FeatureT>::setSourceFeatures (const FeatureCloudConstPtr &features)
-{
- if (features == nullptr || features->empty ())
- {
- PCL_ERROR ("[pcl::%s::setSourceFeatures] Invalid or empty point cloud dataset given!\n", getClassName ().c_str ());
+ if (features == nullptr || features->empty()) {
+ PCL_ERROR(
+ "[pcl::%s::setSourceFeatures] Invalid or empty point cloud dataset given!\n",
+ getClassName().c_str());
return;
}
input_features_ = features;
}
-
-template <typename PointSource, typename PointTarget, typename FeatureT> void
-SampleConsensusInitialAlignment<PointSource, PointTarget, FeatureT>::setTargetFeatures (const FeatureCloudConstPtr &features)
+template <typename PointSource, typename PointTarget, typename FeatureT>
+void
+SampleConsensusInitialAlignment<PointSource, PointTarget, FeatureT>::setTargetFeatures(
+ const FeatureCloudConstPtr& features)
{
- if (features == nullptr || features->empty ())
- {
- PCL_ERROR ("[pcl::%s::setTargetFeatures] Invalid or empty point cloud dataset given!\n", getClassName ().c_str ());
+ if (features == nullptr || features->empty()) {
+ PCL_ERROR(
+ "[pcl::%s::setTargetFeatures] Invalid or empty point cloud dataset given!\n",
+ getClassName().c_str());
return;
}
target_features_ = features;
- feature_tree_->setInputCloud (target_features_);
+ feature_tree_->setInputCloud(target_features_);
}
-
-template <typename PointSource, typename PointTarget, typename FeatureT> void
-SampleConsensusInitialAlignment<PointSource, PointTarget, FeatureT>::selectSamples (
- const PointCloudSource &cloud, int nr_samples, float min_sample_distance,
- std::vector<int> &sample_indices)
+template <typename PointSource, typename PointTarget, typename FeatureT>
+void
+SampleConsensusInitialAlignment<PointSource, PointTarget, FeatureT>::selectSamples(
+ const PointCloudSource& cloud,
+ unsigned int nr_samples,
+ float min_sample_distance,
+ pcl::Indices& sample_indices)
{
- if (nr_samples > static_cast<int> (cloud.size ()))
- {
- PCL_ERROR ("[pcl::%s::selectSamples] ", getClassName ().c_str ());
- PCL_ERROR("The number of samples (%d) must not be greater than the number of "
+ if (nr_samples > cloud.size()) {
+ PCL_ERROR("[pcl::%s::selectSamples] ", getClassName().c_str());
+ PCL_ERROR("The number of samples (%u) must not be greater than the number of "
"points (%zu)!\n",
nr_samples,
static_cast<std::size_t>(cloud.size()));
// Iteratively draw random samples until nr_samples is reached
index_t iterations_without_a_sample = 0;
- const auto max_iterations_without_a_sample = 3 * cloud.size ();
- sample_indices.clear ();
- while (static_cast<int> (sample_indices.size ()) < nr_samples)
- {
+ const auto max_iterations_without_a_sample = 3 * cloud.size();
+ sample_indices.clear();
+ while (sample_indices.size() < nr_samples) {
// Choose a sample at random
- int sample_index = getRandomIndex (static_cast<int> (cloud.size ()));
+ const auto sample_index = getRandomIndex(cloud.size());
// Check to see if the sample is 1) unique and 2) far away from the other samples
bool valid_sample = true;
- for (const int &sample_idx : sample_indices)
- {
- float distance_between_samples = euclideanDistance (cloud[sample_index], cloud[sample_idx]);
+ for (const auto& sample_idx : sample_indices) {
+ float distance_between_samples =
+ euclideanDistance(cloud[sample_index], cloud[sample_idx]);
- if (sample_index == sample_idx || distance_between_samples < min_sample_distance)
- {
+ if (sample_index == sample_idx ||
+ distance_between_samples < min_sample_distance) {
valid_sample = false;
break;
}
}
// If the sample is valid, add it to the output
- if (valid_sample)
- {
- sample_indices.push_back (sample_index);
+ if (valid_sample) {
+ sample_indices.push_back(sample_index);
iterations_without_a_sample = 0;
}
else
++iterations_without_a_sample;
// If no valid samples can be found, relax the inter-sample distance requirements
- if (static_cast<std::size_t>(iterations_without_a_sample) >= max_iterations_without_a_sample)
- {
- PCL_WARN ("[pcl::%s::selectSamples] ", getClassName ().c_str ());
- PCL_WARN ("No valid sample found after %zu iterations. Relaxing min_sample_distance_ to %f\n",
- static_cast<std::size_t>(iterations_without_a_sample), 0.5*min_sample_distance);
+ if (static_cast<std::size_t>(iterations_without_a_sample) >=
+ max_iterations_without_a_sample) {
+ PCL_WARN("[pcl::%s::selectSamples] ", getClassName().c_str());
+ PCL_WARN("No valid sample found after %zu iterations. Relaxing "
+ "min_sample_distance_ to %f\n",
+ static_cast<std::size_t>(iterations_without_a_sample),
+ 0.5 * min_sample_distance);
min_sample_distance_ *= 0.5f;
min_sample_distance = min_sample_distance_;
}
}
-
-template <typename PointSource, typename PointTarget, typename FeatureT> void
-SampleConsensusInitialAlignment<PointSource, PointTarget, FeatureT>::findSimilarFeatures (
- const FeatureCloud &input_features, const std::vector<int> &sample_indices,
- std::vector<int> &corresponding_indices)
+template <typename PointSource, typename PointTarget, typename FeatureT>
+void
+SampleConsensusInitialAlignment<PointSource, PointTarget, FeatureT>::
+ findSimilarFeatures(const FeatureCloud& input_features,
+ const pcl::Indices& sample_indices,
+ pcl::Indices& corresponding_indices)
{
- std::vector<int> nn_indices (k_correspondences_);
- std::vector<float> nn_distances (k_correspondences_);
+ pcl::Indices nn_indices(k_correspondences_);
+ std::vector<float> nn_distances(k_correspondences_);
- corresponding_indices.resize (sample_indices.size ());
- for (std::size_t i = 0; i < sample_indices.size (); ++i)
- {
+ corresponding_indices.resize(sample_indices.size());
+ for (std::size_t i = 0; i < sample_indices.size(); ++i) {
// Find the k features nearest to input_features[sample_indices[i]]
- feature_tree_->nearestKSearch (input_features, sample_indices[i], k_correspondences_, nn_indices, nn_distances);
+ feature_tree_->nearestKSearch(input_features,
+ sample_indices[i],
+ k_correspondences_,
+ nn_indices,
+ nn_distances);
// Select one at random and add it to corresponding_indices
- int random_correspondence = getRandomIndex (k_correspondences_);
+ const auto random_correspondence = getRandomIndex(k_correspondences_);
corresponding_indices[i] = nn_indices[random_correspondence];
}
}
-
-template <typename PointSource, typename PointTarget, typename FeatureT> float
-SampleConsensusInitialAlignment<PointSource, PointTarget, FeatureT>::computeErrorMetric (
- const PointCloudSource &cloud, float)
+template <typename PointSource, typename PointTarget, typename FeatureT>
+float
+SampleConsensusInitialAlignment<PointSource, PointTarget, FeatureT>::computeErrorMetric(
+ const PointCloudSource& cloud, float)
{
- std::vector<int> nn_index (1);
- std::vector<float> nn_distance (1);
+ pcl::Indices nn_index(1);
+ std::vector<float> nn_distance(1);
- const ErrorFunctor & compute_error = *error_functor_;
+ const ErrorFunctor& compute_error = *error_functor_;
float error = 0;
- for (int i = 0; i < static_cast<int> (cloud.size ()); ++i)
- {
- // Find the distance between cloud[i] and its nearest neighbor in the target point cloud
- tree_->nearestKSearch (cloud, i, 1, nn_index, nn_distance);
+ for (const auto& point : cloud) {
+ // Find the distance between point and its nearest neighbor in the target point
+ // cloud
+ tree_->nearestKSearch(point, 1, nn_index, nn_distance);
// Compute the error
- error += compute_error (nn_distance[0]);
+ error += compute_error(nn_distance[0]);
}
return (error);
}
-
-template <typename PointSource, typename PointTarget, typename FeatureT> void
-SampleConsensusInitialAlignment<PointSource, PointTarget, FeatureT>::computeTransformation (PointCloudSource &output, const Eigen::Matrix4f& guess)
+template <typename PointSource, typename PointTarget, typename FeatureT>
+void
+SampleConsensusInitialAlignment<PointSource, PointTarget, FeatureT>::
+ computeTransformation(PointCloudSource& output, const Eigen::Matrix4f& guess)
{
// Some sanity checks first
- if (!input_features_)
- {
- PCL_ERROR ("[pcl::%s::computeTransformation] ", getClassName ().c_str ());
- PCL_ERROR ("No source features were given! Call setSourceFeatures before aligning.\n");
+ if (!input_features_) {
+ PCL_ERROR("[pcl::%s::computeTransformation] ", getClassName().c_str());
+ PCL_ERROR(
+ "No source features were given! Call setSourceFeatures before aligning.\n");
return;
}
- if (!target_features_)
- {
- PCL_ERROR ("[pcl::%s::computeTransformation] ", getClassName ().c_str ());
- PCL_ERROR ("No target features were given! Call setTargetFeatures before aligning.\n");
+ if (!target_features_) {
+ PCL_ERROR("[pcl::%s::computeTransformation] ", getClassName().c_str());
+ PCL_ERROR(
+ "No target features were given! Call setTargetFeatures before aligning.\n");
return;
}
- if (input_->size () != input_features_->size ())
- {
- PCL_ERROR ("[pcl::%s::computeTransformation] ", getClassName ().c_str ());
- PCL_ERROR ("The source points and source feature points need to be in a one-to-one relationship! Current input cloud sizes: %ld vs %ld.\n",
- input_->size (), input_features_->size ());
+ if (input_->size() != input_features_->size()) {
+ PCL_ERROR("[pcl::%s::computeTransformation] ", getClassName().c_str());
+ PCL_ERROR("The source points and source feature points need to be in a one-to-one "
+ "relationship! Current input cloud sizes: %ld vs %ld.\n",
+ input_->size(),
+ input_features_->size());
return;
}
- if (target_->size () != target_features_->size ())
- {
- PCL_ERROR ("[pcl::%s::computeTransformation] ", getClassName ().c_str ());
- PCL_ERROR ("The target points and target feature points need to be in a one-to-one relationship! Current input cloud sizes: %ld vs %ld.\n",
- target_->size (), target_features_->size ());
+ if (target_->size() != target_features_->size()) {
+ PCL_ERROR("[pcl::%s::computeTransformation] ", getClassName().c_str());
+ PCL_ERROR("The target points and target feature points need to be in a one-to-one "
+ "relationship! Current input cloud sizes: %ld vs %ld.\n",
+ target_->size(),
+ target_features_->size());
return;
}
if (!error_functor_)
- error_functor_.reset (new TruncatedError (static_cast<float> (corr_dist_threshold_)));
+ error_functor_.reset(new TruncatedError(static_cast<float>(corr_dist_threshold_)));
-
- std::vector<int> sample_indices (nr_samples_);
- std::vector<int> corresponding_indices (nr_samples_);
+ pcl::Indices sample_indices(nr_samples_);
+ pcl::Indices corresponding_indices(nr_samples_);
PointCloudSource input_transformed;
- float lowest_error (0);
+ float lowest_error(0);
final_transformation_ = guess;
int i_iter = 0;
converged_ = false;
- if (!guess.isApprox (Eigen::Matrix4f::Identity (), 0.01f))
- {
+ if (!guess.isApprox(Eigen::Matrix4f::Identity(), 0.01f)) {
// If guess is not the Identity matrix we check it.
- transformPointCloud (*input_, input_transformed, final_transformation_);
- lowest_error = computeErrorMetric (input_transformed, static_cast<float> (corr_dist_threshold_));
+ transformPointCloud(*input_, input_transformed, final_transformation_);
+ lowest_error =
+ computeErrorMetric(input_transformed, static_cast<float>(corr_dist_threshold_));
i_iter = 1;
}
- for (; i_iter < max_iterations_; ++i_iter)
- {
+ for (; i_iter < max_iterations_; ++i_iter) {
// Draw nr_samples_ random samples
- selectSamples (*input_, nr_samples_, min_sample_distance_, sample_indices);
+ selectSamples(*input_, nr_samples_, min_sample_distance_, sample_indices);
// Find corresponding features in the target cloud
- findSimilarFeatures (*input_features_, sample_indices, corresponding_indices);
+ findSimilarFeatures(*input_features_, sample_indices, corresponding_indices);
// Estimate the transform from the samples to their corresponding points
- transformation_estimation_->estimateRigidTransformation (*input_, sample_indices, *target_, corresponding_indices, transformation_);
+ transformation_estimation_->estimateRigidTransformation(
+ *input_, sample_indices, *target_, corresponding_indices, transformation_);
// Transform the data and compute the error
- transformPointCloud (*input_, input_transformed, transformation_);
- float error = computeErrorMetric (input_transformed, static_cast<float> (corr_dist_threshold_));
+ transformPointCloud(*input_, input_transformed, transformation_);
+ float error =
+ computeErrorMetric(input_transformed, static_cast<float>(corr_dist_threshold_));
// If the new error is lower, update the final transformation
- if (i_iter == 0 || error < lowest_error)
- {
+ if (i_iter == 0 || error < lowest_error) {
lowest_error = error;
final_transformation_ = transformation_;
- converged_=true;
+ converged_ = true;
}
}
// Apply the final transformation
- transformPointCloud (*input_, output, final_transformation_);
+ transformPointCloud(*input_, output, final_transformation_);
}
} // namespace pcl
-#endif //#ifndef IA_RANSAC_HPP_
-
+#endif //#ifndef IA_RANSAC_HPP_
#ifndef PCL_REGISTRATION_IMPL_ICP_HPP_
#define PCL_REGISTRATION_IMPL_ICP_HPP_
-#include <pcl/registration/boost.h>
#include <pcl/correspondence.h>
+namespace pcl {
-namespace pcl
+template <typename PointSource, typename PointTarget, typename Scalar>
+void
+IterativeClosestPoint<PointSource, PointTarget, Scalar>::transformCloud(
+ const PointCloudSource& input, PointCloudSource& output, const Matrix4& transform)
{
+ Eigen::Vector4f pt(0.0f, 0.0f, 0.0f, 1.0f), pt_t;
+ Eigen::Matrix4f tr = transform.template cast<float>();
-template <typename PointSource, typename PointTarget, typename Scalar> void
-IterativeClosestPoint<PointSource, PointTarget, Scalar>::transformCloud (
- const PointCloudSource &input,
- PointCloudSource &output,
- const Matrix4 &transform)
-{
- Eigen::Vector4f pt (0.0f, 0.0f, 0.0f, 1.0f), pt_t;
- Eigen::Matrix4f tr = transform.template cast<float> ();
-
- // XYZ is ALWAYS present due to the templatization, so we only have to check for normals
- if (source_has_normals_)
- {
+ // XYZ is ALWAYS present due to the templatization, so we only have to check for
+ // normals
+ if (source_has_normals_) {
Eigen::Vector3f nt, nt_t;
- Eigen::Matrix3f rot = tr.block<3, 3> (0, 0);
+ Eigen::Matrix3f rot = tr.block<3, 3>(0, 0);
- for (std::size_t i = 0; i < input.size (); ++i)
- {
- const std::uint8_t* data_in = reinterpret_cast<const std::uint8_t*> (&input[i]);
- std::uint8_t* data_out = reinterpret_cast<std::uint8_t*> (&output[i]);
- memcpy (&pt[0], data_in + x_idx_offset_, sizeof (float));
- memcpy (&pt[1], data_in + y_idx_offset_, sizeof (float));
- memcpy (&pt[2], data_in + z_idx_offset_, sizeof (float));
+ for (std::size_t i = 0; i < input.size(); ++i) {
+ const std::uint8_t* data_in = reinterpret_cast<const std::uint8_t*>(&input[i]);
+ std::uint8_t* data_out = reinterpret_cast<std::uint8_t*>(&output[i]);
+ memcpy(&pt[0], data_in + x_idx_offset_, sizeof(float));
+ memcpy(&pt[1], data_in + y_idx_offset_, sizeof(float));
+ memcpy(&pt[2], data_in + z_idx_offset_, sizeof(float));
- if (!std::isfinite (pt[0]) || !std::isfinite (pt[1]) || !std::isfinite (pt[2]))
+ if (!std::isfinite(pt[0]) || !std::isfinite(pt[1]) || !std::isfinite(pt[2]))
continue;
pt_t = tr * pt;
- memcpy (data_out + x_idx_offset_, &pt_t[0], sizeof (float));
- memcpy (data_out + y_idx_offset_, &pt_t[1], sizeof (float));
- memcpy (data_out + z_idx_offset_, &pt_t[2], sizeof (float));
+ memcpy(data_out + x_idx_offset_, &pt_t[0], sizeof(float));
+ memcpy(data_out + y_idx_offset_, &pt_t[1], sizeof(float));
+ memcpy(data_out + z_idx_offset_, &pt_t[2], sizeof(float));
- memcpy (&nt[0], data_in + nx_idx_offset_, sizeof (float));
- memcpy (&nt[1], data_in + ny_idx_offset_, sizeof (float));
- memcpy (&nt[2], data_in + nz_idx_offset_, sizeof (float));
+ memcpy(&nt[0], data_in + nx_idx_offset_, sizeof(float));
+ memcpy(&nt[1], data_in + ny_idx_offset_, sizeof(float));
+ memcpy(&nt[2], data_in + nz_idx_offset_, sizeof(float));
- if (!std::isfinite (nt[0]) || !std::isfinite (nt[1]) || !std::isfinite (nt[2]))
+ if (!std::isfinite(nt[0]) || !std::isfinite(nt[1]) || !std::isfinite(nt[2]))
continue;
nt_t = rot * nt;
- memcpy (data_out + nx_idx_offset_, &nt_t[0], sizeof (float));
- memcpy (data_out + ny_idx_offset_, &nt_t[1], sizeof (float));
- memcpy (data_out + nz_idx_offset_, &nt_t[2], sizeof (float));
+ memcpy(data_out + nx_idx_offset_, &nt_t[0], sizeof(float));
+ memcpy(data_out + ny_idx_offset_, &nt_t[1], sizeof(float));
+ memcpy(data_out + nz_idx_offset_, &nt_t[2], sizeof(float));
}
}
- else
- {
- for (std::size_t i = 0; i < input.size (); ++i)
- {
- const std::uint8_t* data_in = reinterpret_cast<const std::uint8_t*> (&input[i]);
- std::uint8_t* data_out = reinterpret_cast<std::uint8_t*> (&output[i]);
- memcpy (&pt[0], data_in + x_idx_offset_, sizeof (float));
- memcpy (&pt[1], data_in + y_idx_offset_, sizeof (float));
- memcpy (&pt[2], data_in + z_idx_offset_, sizeof (float));
-
- if (!std::isfinite (pt[0]) || !std::isfinite (pt[1]) || !std::isfinite (pt[2]))
+ else {
+ for (std::size_t i = 0; i < input.size(); ++i) {
+ const std::uint8_t* data_in = reinterpret_cast<const std::uint8_t*>(&input[i]);
+ std::uint8_t* data_out = reinterpret_cast<std::uint8_t*>(&output[i]);
+ memcpy(&pt[0], data_in + x_idx_offset_, sizeof(float));
+ memcpy(&pt[1], data_in + y_idx_offset_, sizeof(float));
+ memcpy(&pt[2], data_in + z_idx_offset_, sizeof(float));
+
+ if (!std::isfinite(pt[0]) || !std::isfinite(pt[1]) || !std::isfinite(pt[2]))
continue;
pt_t = tr * pt;
- memcpy (data_out + x_idx_offset_, &pt_t[0], sizeof (float));
- memcpy (data_out + y_idx_offset_, &pt_t[1], sizeof (float));
- memcpy (data_out + z_idx_offset_, &pt_t[2], sizeof (float));
+ memcpy(data_out + x_idx_offset_, &pt_t[0], sizeof(float));
+ memcpy(data_out + y_idx_offset_, &pt_t[1], sizeof(float));
+ memcpy(data_out + z_idx_offset_, &pt_t[2], sizeof(float));
}
}
}
-
-template <typename PointSource, typename PointTarget, typename Scalar> void
-IterativeClosestPoint<PointSource, PointTarget, Scalar>::computeTransformation (
- PointCloudSource &output, const Matrix4 &guess)
+template <typename PointSource, typename PointTarget, typename Scalar>
+void
+IterativeClosestPoint<PointSource, PointTarget, Scalar>::computeTransformation(
+ PointCloudSource& output, const Matrix4& guess)
{
// Point cloud containing the correspondences of each point in <input, indices>
- PointCloudSourcePtr input_transformed (new PointCloudSource);
+ PointCloudSourcePtr input_transformed(new PointCloudSource);
nr_iterations_ = 0;
converged_ = false;
final_transformation_ = guess;
// If the guessed transformation is non identity
- if (guess != Matrix4::Identity ())
- {
- input_transformed->resize (input_->size ());
- // Apply guessed transformation prior to search for neighbours
- transformCloud (*input_, *input_transformed, guess);
+ if (guess != Matrix4::Identity()) {
+ input_transformed->resize(input_->size());
+ // Apply guessed transformation prior to search for neighbours
+ transformCloud(*input_, *input_transformed, guess);
}
else
*input_transformed = *input_;
- transformation_ = Matrix4::Identity ();
+ transformation_ = Matrix4::Identity();
// Make blobs if necessary
- determineRequiredBlobData ();
- PCLPointCloud2::Ptr target_blob (new PCLPointCloud2);
+ determineRequiredBlobData();
+ PCLPointCloud2::Ptr target_blob(new PCLPointCloud2);
if (need_target_blob_)
- pcl::toPCLPointCloud2 (*target_, *target_blob);
+ pcl::toPCLPointCloud2(*target_, *target_blob);
// Pass in the default target for the Correspondence Estimation/Rejection code
- correspondence_estimation_->setInputTarget (target_);
- if (correspondence_estimation_->requiresTargetNormals ())
- correspondence_estimation_->setTargetNormals (target_blob);
+ correspondence_estimation_->setInputTarget(target_);
+ if (correspondence_estimation_->requiresTargetNormals())
+ correspondence_estimation_->setTargetNormals(target_blob);
// Correspondence Rejectors need a binary blob
- for (std::size_t i = 0; i < correspondence_rejectors_.size (); ++i)
- {
+ for (std::size_t i = 0; i < correspondence_rejectors_.size(); ++i) {
registration::CorrespondenceRejector::Ptr& rej = correspondence_rejectors_[i];
- if (rej->requiresTargetPoints ())
- rej->setTargetPoints (target_blob);
- if (rej->requiresTargetNormals () && target_has_normals_)
- rej->setTargetNormals (target_blob);
+ if (rej->requiresTargetPoints())
+ rej->setTargetPoints(target_blob);
+ if (rej->requiresTargetNormals() && target_has_normals_)
+ rej->setTargetNormals(target_blob);
}
- convergence_criteria_->setMaximumIterations (max_iterations_);
- convergence_criteria_->setRelativeMSE (euclidean_fitness_epsilon_);
- convergence_criteria_->setTranslationThreshold (transformation_epsilon_);
+ convergence_criteria_->setMaximumIterations(max_iterations_);
+ convergence_criteria_->setRelativeMSE(euclidean_fitness_epsilon_);
+ convergence_criteria_->setTranslationThreshold(transformation_epsilon_);
if (transformation_rotation_epsilon_ > 0)
- convergence_criteria_->setRotationThreshold (transformation_rotation_epsilon_);
+ convergence_criteria_->setRotationThreshold(transformation_rotation_epsilon_);
else
- convergence_criteria_->setRotationThreshold (1.0 - transformation_epsilon_);
+ convergence_criteria_->setRotationThreshold(1.0 - transformation_epsilon_);
// Repeat until convergence
- do
- {
+ do {
// Get blob data if needed
PCLPointCloud2::Ptr input_transformed_blob;
- if (need_source_blob_)
- {
- input_transformed_blob.reset (new PCLPointCloud2);
- toPCLPointCloud2 (*input_transformed, *input_transformed_blob);
+ if (need_source_blob_) {
+ input_transformed_blob.reset(new PCLPointCloud2);
+ toPCLPointCloud2(*input_transformed, *input_transformed_blob);
}
// Save the previously estimated transformation
previous_transformation_ = transformation_;
// Set the source each iteration, to ensure the dirty flag is updated
- correspondence_estimation_->setInputSource (input_transformed);
- if (correspondence_estimation_->requiresSourceNormals ())
- correspondence_estimation_->setSourceNormals (input_transformed_blob);
+ correspondence_estimation_->setInputSource(input_transformed);
+ if (correspondence_estimation_->requiresSourceNormals())
+ correspondence_estimation_->setSourceNormals(input_transformed_blob);
// Estimate correspondences
if (use_reciprocal_correspondence_)
- correspondence_estimation_->determineReciprocalCorrespondences (*correspondences_, corr_dist_threshold_);
+ correspondence_estimation_->determineReciprocalCorrespondences(
+ *correspondences_, corr_dist_threshold_);
else
- correspondence_estimation_->determineCorrespondences (*correspondences_, corr_dist_threshold_);
+ correspondence_estimation_->determineCorrespondences(*correspondences_,
+ corr_dist_threshold_);
- //if (correspondence_rejectors_.empty ())
- CorrespondencesPtr temp_correspondences (new Correspondences (*correspondences_));
- for (std::size_t i = 0; i < correspondence_rejectors_.size (); ++i)
- {
+ // if (correspondence_rejectors_.empty ())
+ CorrespondencesPtr temp_correspondences(new Correspondences(*correspondences_));
+ for (std::size_t i = 0; i < correspondence_rejectors_.size(); ++i) {
registration::CorrespondenceRejector::Ptr& rej = correspondence_rejectors_[i];
- PCL_DEBUG ("Applying a correspondence rejector method: %s.\n", rej->getClassName ().c_str ());
- if (rej->requiresSourcePoints ())
- rej->setSourcePoints (input_transformed_blob);
- if (rej->requiresSourceNormals () && source_has_normals_)
- rej->setSourceNormals (input_transformed_blob);
- rej->setInputCorrespondences (temp_correspondences);
- rej->getCorrespondences (*correspondences_);
+ PCL_DEBUG("Applying a correspondence rejector method: %s.\n",
+ rej->getClassName().c_str());
+ if (rej->requiresSourcePoints())
+ rej->setSourcePoints(input_transformed_blob);
+ if (rej->requiresSourceNormals() && source_has_normals_)
+ rej->setSourceNormals(input_transformed_blob);
+ rej->setInputCorrespondences(temp_correspondences);
+ rej->getCorrespondences(*correspondences_);
// Modify input for the next iteration
- if (i < correspondence_rejectors_.size () - 1)
+ if (i < correspondence_rejectors_.size() - 1)
*temp_correspondences = *correspondences_;
}
- std::size_t cnt = correspondences_->size ();
+ std::size_t cnt = correspondences_->size();
// Check whether we have enough correspondences
- if (static_cast<int> (cnt) < min_number_correspondences_)
- {
- PCL_ERROR ("[pcl::%s::computeTransformation] Not enough correspondences found. Relax your threshold parameters.\n", getClassName ().c_str ());
- convergence_criteria_->setConvergenceState(pcl::registration::DefaultConvergenceCriteria<Scalar>::CONVERGENCE_CRITERIA_NO_CORRESPONDENCES);
+ if (static_cast<int>(cnt) < min_number_correspondences_) {
+ PCL_ERROR("[pcl::%s::computeTransformation] Not enough correspondences found. "
+ "Relax your threshold parameters.\n",
+ getClassName().c_str());
+ convergence_criteria_->setConvergenceState(
+ pcl::registration::DefaultConvergenceCriteria<
+ Scalar>::CONVERGENCE_CRITERIA_NO_CORRESPONDENCES);
converged_ = false;
break;
}
// Estimate the transform
- transformation_estimation_->estimateRigidTransformation (*input_transformed, *target_, *correspondences_, transformation_);
+ transformation_estimation_->estimateRigidTransformation(
+ *input_transformed, *target_, *correspondences_, transformation_);
// Transform the data
- transformCloud (*input_transformed, *input_transformed, transformation_);
+ transformCloud(*input_transformed, *input_transformed, transformation_);
// Obtain the final transformation
final_transformation_ = transformation_ * final_transformation_;
++nr_iterations_;
// Update the vizualization of icp convergence
- //if (update_visualizer_ != 0)
+ // if (update_visualizer_ != 0)
// update_visualizer_(output, source_indices_good, *target_, target_indices_good );
- converged_ = static_cast<bool> ((*convergence_criteria_));
- }
- while (convergence_criteria_->getConvergenceState() == pcl::registration::DefaultConvergenceCriteria<Scalar>::CONVERGENCE_CRITERIA_NOT_CONVERGED);
+ converged_ = static_cast<bool>((*convergence_criteria_));
+ } while (convergence_criteria_->getConvergenceState() ==
+ pcl::registration::DefaultConvergenceCriteria<
+ Scalar>::CONVERGENCE_CRITERIA_NOT_CONVERGED);
// Transform the input cloud using the final transformation
- PCL_DEBUG ("Transformation is:\n\t%5f\t%5f\t%5f\t%5f\n\t%5f\t%5f\t%5f\t%5f\n\t%5f\t%5f\t%5f\t%5f\n\t%5f\t%5f\t%5f\t%5f\n",
- final_transformation_ (0, 0), final_transformation_ (0, 1), final_transformation_ (0, 2), final_transformation_ (0, 3),
- final_transformation_ (1, 0), final_transformation_ (1, 1), final_transformation_ (1, 2), final_transformation_ (1, 3),
- final_transformation_ (2, 0), final_transformation_ (2, 1), final_transformation_ (2, 2), final_transformation_ (2, 3),
- final_transformation_ (3, 0), final_transformation_ (3, 1), final_transformation_ (3, 2), final_transformation_ (3, 3));
+ PCL_DEBUG("Transformation "
+ "is:\n\t%5f\t%5f\t%5f\t%5f\n\t%5f\t%5f\t%5f\t%5f\n\t%5f\t%5f\t%5f\t%5f\n\t%"
+ "5f\t%5f\t%5f\t%5f\n",
+ final_transformation_(0, 0),
+ final_transformation_(0, 1),
+ final_transformation_(0, 2),
+ final_transformation_(0, 3),
+ final_transformation_(1, 0),
+ final_transformation_(1, 1),
+ final_transformation_(1, 2),
+ final_transformation_(1, 3),
+ final_transformation_(2, 0),
+ final_transformation_(2, 1),
+ final_transformation_(2, 2),
+ final_transformation_(2, 3),
+ final_transformation_(3, 0),
+ final_transformation_(3, 1),
+ final_transformation_(3, 2),
+ final_transformation_(3, 3));
// Copy all the values
output = *input_;
// Transform the XYZ + normals
- transformCloud (*input_, output, final_transformation_);
+ transformCloud(*input_, output, final_transformation_);
}
-
-template <typename PointSource, typename PointTarget, typename Scalar> void
-IterativeClosestPoint<PointSource, PointTarget, Scalar>::determineRequiredBlobData ()
+template <typename PointSource, typename PointTarget, typename Scalar>
+void
+IterativeClosestPoint<PointSource, PointTarget, Scalar>::determineRequiredBlobData()
{
need_source_blob_ = false;
need_target_blob_ = false;
// Check estimator
- need_source_blob_ |= correspondence_estimation_->requiresSourceNormals ();
- need_target_blob_ |= correspondence_estimation_->requiresTargetNormals ();
+ need_source_blob_ |= correspondence_estimation_->requiresSourceNormals();
+ need_target_blob_ |= correspondence_estimation_->requiresTargetNormals();
// Add warnings if necessary
- if (correspondence_estimation_->requiresSourceNormals () && !source_has_normals_)
- {
- PCL_WARN("[pcl::%s::determineRequiredBlobData] Estimator expects source normals, but we can't provide them.\n", getClassName ().c_str ());
+ if (correspondence_estimation_->requiresSourceNormals() && !source_has_normals_) {
+ PCL_WARN("[pcl::%s::determineRequiredBlobData] Estimator expects source normals, "
+ "but we can't provide them.\n",
+ getClassName().c_str());
}
- if (correspondence_estimation_->requiresTargetNormals () && !target_has_normals_)
- {
- PCL_WARN("[pcl::%s::determineRequiredBlobData] Estimator expects target normals, but we can't provide them.\n", getClassName ().c_str ());
+ if (correspondence_estimation_->requiresTargetNormals() && !target_has_normals_) {
+ PCL_WARN("[pcl::%s::determineRequiredBlobData] Estimator expects target normals, "
+ "but we can't provide them.\n",
+ getClassName().c_str());
}
// Check rejectors
- for (std::size_t i = 0; i < correspondence_rejectors_.size (); i++)
- {
+ for (std::size_t i = 0; i < correspondence_rejectors_.size(); i++) {
registration::CorrespondenceRejector::Ptr& rej = correspondence_rejectors_[i];
- need_source_blob_ |= rej->requiresSourcePoints ();
- need_source_blob_ |= rej->requiresSourceNormals ();
- need_target_blob_ |= rej->requiresTargetPoints ();
- need_target_blob_ |= rej->requiresTargetNormals ();
- if (rej->requiresSourceNormals () && !source_has_normals_)
- {
- PCL_WARN("[pcl::%s::determineRequiredBlobData] Rejector %s expects source normals, but we can't provide them.\n", getClassName ().c_str (), rej->getClassName ().c_str ());
+ need_source_blob_ |= rej->requiresSourcePoints();
+ need_source_blob_ |= rej->requiresSourceNormals();
+ need_target_blob_ |= rej->requiresTargetPoints();
+ need_target_blob_ |= rej->requiresTargetNormals();
+ if (rej->requiresSourceNormals() && !source_has_normals_) {
+ PCL_WARN("[pcl::%s::determineRequiredBlobData] Rejector %s expects source "
+ "normals, but we can't provide them.\n",
+ getClassName().c_str(),
+ rej->getClassName().c_str());
}
- if (rej->requiresTargetNormals () && !target_has_normals_)
- {
- PCL_WARN("[pcl::%s::determineRequiredBlobData] Rejector %s expects target normals, but we can't provide them.\n", getClassName ().c_str (), rej->getClassName ().c_str ());
+ if (rej->requiresTargetNormals() && !target_has_normals_) {
+ PCL_WARN("[pcl::%s::determineRequiredBlobData] Rejector %s expects target "
+ "normals, but we can't provide them.\n",
+ getClassName().c_str(),
+ rej->getClassName().c_str());
}
}
}
-
-template <typename PointSource, typename PointTarget, typename Scalar> void
-IterativeClosestPointWithNormals<PointSource, PointTarget, Scalar>::transformCloud (
- const PointCloudSource &input,
- PointCloudSource &output,
- const Matrix4 &transform)
+template <typename PointSource, typename PointTarget, typename Scalar>
+void
+IterativeClosestPointWithNormals<PointSource, PointTarget, Scalar>::transformCloud(
+ const PointCloudSource& input, PointCloudSource& output, const Matrix4& transform)
{
- pcl::transformPointCloudWithNormals (input, output, transform);
+ pcl::transformPointCloudWithNormals(input, output, transform);
}
} // namespace pcl
#endif /* PCL_REGISTRATION_IMPL_ICP_HPP_ */
-
#define PCL_REGISTRATION_ICP_NL_HPP_
#endif /* PCL_REGISTRATION_ICP_NL_HPP_ */
-
#ifndef PCL_REGISTRATION_IMPL_INCREMENTAL_REGISTRATION_HPP_
#define PCL_REGISTRATION_IMPL_INCREMENTAL_REGISTRATION_HPP_
+namespace pcl {
-namespace pcl
-{
-
-namespace registration
-{
+namespace registration {
template <typename PointT, typename Scalar>
-IncrementalRegistration<PointT, Scalar>::IncrementalRegistration () :
- delta_transform_ (Matrix4::Identity ()),
- abs_transform_ (Matrix4::Identity ())
+IncrementalRegistration<PointT, Scalar>::IncrementalRegistration()
+: delta_transform_(Matrix4::Identity()), abs_transform_(Matrix4::Identity())
{}
-template <typename PointT, typename Scalar> bool
-IncrementalRegistration<PointT, Scalar>::registerCloud (const PointCloudConstPtr& cloud, const Matrix4& delta_estimate)
+template <typename PointT, typename Scalar>
+bool
+IncrementalRegistration<PointT, Scalar>::registerCloud(const PointCloudConstPtr& cloud,
+ const Matrix4& delta_estimate)
{
- assert (registration_);
+ assert(registration_);
- if (!last_cloud_)
- {
+ if (!last_cloud_) {
last_cloud_ = cloud;
abs_transform_ = delta_transform_ = delta_estimate;
return (true);
}
- registration_->setInputSource (cloud);
- registration_->setInputTarget (last_cloud_);
+ registration_->setInputSource(cloud);
+ registration_->setInputTarget(last_cloud_);
{
- pcl::PointCloud<PointT> p;
- registration_->align (p, delta_estimate);
+ pcl::PointCloud<PointT> p;
+ registration_->align(p, delta_estimate);
}
- bool converged = registration_->hasConverged ();
+ bool converged = registration_->hasConverged();
- if ( converged ){
- delta_transform_ = registration_->getFinalTransformation ();
+ if (converged) {
+ delta_transform_ = registration_->getFinalTransformation();
abs_transform_ *= delta_transform_;
last_cloud_ = cloud;
}
return (converged);
}
-template <typename PointT, typename Scalar> inline typename pcl::registration::IncrementalRegistration<PointT, Scalar>::Matrix4
-IncrementalRegistration<PointT, Scalar>::getDeltaTransform () const
+template <typename PointT, typename Scalar>
+inline typename pcl::registration::IncrementalRegistration<PointT, Scalar>::Matrix4
+IncrementalRegistration<PointT, Scalar>::getDeltaTransform() const
{
return (delta_transform_);
}
-template <typename PointT, typename Scalar> inline typename pcl::registration::IncrementalRegistration<PointT, Scalar>::Matrix4
-IncrementalRegistration<PointT, Scalar>::getAbsoluteTransform () const
+template <typename PointT, typename Scalar>
+inline typename pcl::registration::IncrementalRegistration<PointT, Scalar>::Matrix4
+IncrementalRegistration<PointT, Scalar>::getAbsoluteTransform() const
{
return (abs_transform_);
}
-template <typename PointT, typename Scalar> inline void
-IncrementalRegistration<PointT, Scalar>::reset ()
+template <typename PointT, typename Scalar>
+inline void
+IncrementalRegistration<PointT, Scalar>::reset()
{
- last_cloud_.reset ();
- delta_transform_ = abs_transform_ = Matrix4::Identity ();
+ last_cloud_.reset();
+ delta_transform_ = abs_transform_ = Matrix4::Identity();
}
-template <typename PointT, typename Scalar> inline void
-IncrementalRegistration<PointT, Scalar>::setRegistration (RegistrationPtr registration)
+template <typename PointT, typename Scalar>
+inline void
+IncrementalRegistration<PointT, Scalar>::setRegistration(RegistrationPtr registration)
{
registration_ = registration;
}
} // namespace pcl
#endif /*PCL_REGISTRATION_IMPL_INCREMENTAL_REGISTRATION_HPP_*/
-
#ifndef PCL_REGISTRATION_IMPL_JOINT_ICP_HPP_
#define PCL_REGISTRATION_IMPL_JOINT_ICP_HPP_
-#include <pcl/registration/boost.h>
-#include <pcl/correspondence.h>
#include <pcl/console/print.h>
+#include <pcl/correspondence.h>
+namespace pcl {
-namespace pcl
-{
-
-template <typename PointSource, typename PointTarget, typename Scalar> void
-JointIterativeClosestPoint<PointSource, PointTarget, Scalar>::computeTransformation (
- PointCloudSource &output, const Matrix4 &guess)
+template <typename PointSource, typename PointTarget, typename Scalar>
+void
+JointIterativeClosestPoint<PointSource, PointTarget, Scalar>::computeTransformation(
+ PointCloudSource& output, const Matrix4& guess)
{
// Point clouds containing the correspondences of each point in <input, indices>
- if (sources_.size () != targets_.size () || sources_.empty () || targets_.empty ())
- {
- PCL_ERROR ("[pcl::%s::computeTransformation] Must set InputSources and InputTargets to the same, nonzero size!\n",
- getClassName ().c_str ());
+ if (sources_.size() != targets_.size() || sources_.empty() || targets_.empty()) {
+ PCL_ERROR("[pcl::%s::computeTransformation] Must set InputSources and InputTargets "
+ "to the same, nonzero size!\n",
+ getClassName().c_str());
return;
}
bool manual_correspondence_estimations_set = true;
- if (correspondence_estimations_.empty ())
- {
+ if (correspondence_estimations_.empty()) {
manual_correspondence_estimations_set = false;
- correspondence_estimations_.resize (sources_.size ());
- for (std::size_t i = 0; i < sources_.size (); i++)
- {
- correspondence_estimations_[i] = correspondence_estimation_->clone ();
- KdTreeReciprocalPtr src_tree (new KdTreeReciprocal);
- KdTreePtr tgt_tree (new KdTree);
- correspondence_estimations_[i]->setSearchMethodTarget (tgt_tree);
- correspondence_estimations_[i]->setSearchMethodSource (src_tree);
+ correspondence_estimations_.resize(sources_.size());
+ for (std::size_t i = 0; i < sources_.size(); i++) {
+ correspondence_estimations_[i] = correspondence_estimation_->clone();
+ KdTreeReciprocalPtr src_tree(new KdTreeReciprocal);
+ KdTreePtr tgt_tree(new KdTree);
+ correspondence_estimations_[i]->setSearchMethodTarget(tgt_tree);
+ correspondence_estimations_[i]->setSearchMethodSource(src_tree);
}
}
- if (correspondence_estimations_.size () != sources_.size ())
- {
- PCL_ERROR ("[pcl::%s::computeTransform] Must set CorrespondenceEstimations to be the same size as the joint\n",
- getClassName ().c_str ());
+ if (correspondence_estimations_.size() != sources_.size()) {
+ PCL_ERROR("[pcl::%s::computeTransform] Must set CorrespondenceEstimations to be "
+ "the same size as the joint\n",
+ getClassName().c_str());
return;
}
- std::vector<PointCloudSourcePtr> inputs_transformed (sources_.size ());
- for (std::size_t i = 0; i < sources_.size (); i++)
- {
- inputs_transformed[i].reset (new PointCloudSource);
+ std::vector<PointCloudSourcePtr> inputs_transformed(sources_.size());
+ for (std::size_t i = 0; i < sources_.size(); i++) {
+ inputs_transformed[i].reset(new PointCloudSource);
}
nr_iterations_ = 0;
final_transformation_ = guess;
// Make a combined transformed input and output
- std::vector<std::size_t> input_offsets (sources_.size ());
- std::vector<std::size_t> target_offsets (targets_.size ());
- PointCloudSourcePtr sources_combined (new PointCloudSource);
- PointCloudSourcePtr inputs_transformed_combined (new PointCloudSource);
- PointCloudTargetPtr targets_combined (new PointCloudTarget);
+ std::vector<std::size_t> input_offsets(sources_.size());
+ std::vector<std::size_t> target_offsets(targets_.size());
+ PointCloudSourcePtr sources_combined(new PointCloudSource);
+ PointCloudSourcePtr inputs_transformed_combined(new PointCloudSource);
+ PointCloudTargetPtr targets_combined(new PointCloudTarget);
std::size_t input_offset = 0;
std::size_t target_offset = 0;
- for (std::size_t i = 0; i < sources_.size (); i++)
- {
+ for (std::size_t i = 0; i < sources_.size(); i++) {
// If the guessed transformation is non identity
- if (guess != Matrix4::Identity ())
- {
- // Apply guessed transformation prior to search for neighbours
- this->transformCloud (*sources_[i], *inputs_transformed[i], guess);
+ if (guess != Matrix4::Identity()) {
+ // Apply guessed transformation prior to search for neighbours
+ this->transformCloud(*sources_[i], *inputs_transformed[i], guess);
}
- else
- {
+ else {
*inputs_transformed[i] = *sources_[i];
}
*sources_combined += *sources_[i];
*targets_combined += *targets_[i];
input_offsets[i] = input_offset;
target_offsets[i] = target_offset;
- input_offset += inputs_transformed[i]->size ();
- target_offset += targets_[i]->size ();
+ input_offset += inputs_transformed[i]->size();
+ target_offset += targets_[i]->size();
}
- transformation_ = Matrix4::Identity ();
+ transformation_ = Matrix4::Identity();
// Make blobs if necessary
- determineRequiredBlobData ();
+ determineRequiredBlobData();
// Pass in the default target for the Correspondence Estimation/Rejection code
- for (std::size_t i = 0; i < sources_.size (); i++)
- {
- correspondence_estimations_[i]->setInputTarget (targets_[i]);
- if (correspondence_estimations_[i]->requiresTargetNormals ())
- {
- PCLPointCloud2::Ptr target_blob (new PCLPointCloud2);
- pcl::toPCLPointCloud2 (*targets_[i], *target_blob);
- correspondence_estimations_[i]->setTargetNormals (target_blob);
+ for (std::size_t i = 0; i < sources_.size(); i++) {
+ correspondence_estimations_[i]->setInputTarget(targets_[i]);
+ if (correspondence_estimations_[i]->requiresTargetNormals()) {
+ PCLPointCloud2::Ptr target_blob(new PCLPointCloud2);
+ pcl::toPCLPointCloud2(*targets_[i], *target_blob);
+ correspondence_estimations_[i]->setTargetNormals(target_blob);
}
}
- PCLPointCloud2::Ptr targets_combined_blob (new PCLPointCloud2);
- if (!correspondence_rejectors_.empty () && need_target_blob_)
- pcl::toPCLPointCloud2 (*targets_combined, *targets_combined_blob);
+ PCLPointCloud2::Ptr targets_combined_blob(new PCLPointCloud2);
+ if (!correspondence_rejectors_.empty() && need_target_blob_)
+ pcl::toPCLPointCloud2(*targets_combined, *targets_combined_blob);
- for (std::size_t i = 0; i < correspondence_rejectors_.size (); ++i)
- {
+ for (std::size_t i = 0; i < correspondence_rejectors_.size(); ++i) {
registration::CorrespondenceRejector::Ptr& rej = correspondence_rejectors_[i];
- if (rej->requiresTargetPoints ())
- rej->setTargetPoints (targets_combined_blob);
- if (rej->requiresTargetNormals () && target_has_normals_)
- rej->setTargetNormals (targets_combined_blob);
+ if (rej->requiresTargetPoints())
+ rej->setTargetPoints(targets_combined_blob);
+ if (rej->requiresTargetNormals() && target_has_normals_)
+ rej->setTargetNormals(targets_combined_blob);
}
- convergence_criteria_->setMaximumIterations (max_iterations_);
- convergence_criteria_->setRelativeMSE (euclidean_fitness_epsilon_);
- convergence_criteria_->setTranslationThreshold (transformation_epsilon_);
- convergence_criteria_->setRotationThreshold (1.0 - transformation_epsilon_);
+ convergence_criteria_->setMaximumIterations(max_iterations_);
+ convergence_criteria_->setRelativeMSE(euclidean_fitness_epsilon_);
+ convergence_criteria_->setTranslationThreshold(transformation_epsilon_);
+ convergence_criteria_->setRotationThreshold(1.0 - transformation_epsilon_);
// Repeat until convergence
- std::vector<CorrespondencesPtr> partial_correspondences_ (sources_.size ());
- for (std::size_t i = 0; i < sources_.size (); i++)
- {
- partial_correspondences_[i].reset (new pcl::Correspondences);
+ std::vector<CorrespondencesPtr> partial_correspondences_(sources_.size());
+ for (std::size_t i = 0; i < sources_.size(); i++) {
+ partial_correspondences_[i].reset(new pcl::Correspondences);
}
- do
- {
+ do {
// Save the previously estimated transformation
previous_transformation_ = transformation_;
// Set the source each iteration, to ensure the dirty flag is updated
- correspondences_->clear ();
- for (std::size_t i = 0; i < correspondence_estimations_.size (); i++)
- {
- correspondence_estimations_[i]->setInputSource (inputs_transformed[i]);
+ correspondences_->clear();
+ for (std::size_t i = 0; i < correspondence_estimations_.size(); i++) {
+ correspondence_estimations_[i]->setInputSource(inputs_transformed[i]);
// Get blob data if needed
- if (correspondence_estimations_[i]->requiresSourceNormals ())
- {
- PCLPointCloud2::Ptr input_transformed_blob (new PCLPointCloud2);
- toPCLPointCloud2 (*inputs_transformed[i], *input_transformed_blob);
- correspondence_estimations_[i]->setSourceNormals (input_transformed_blob);
+ if (correspondence_estimations_[i]->requiresSourceNormals()) {
+ PCLPointCloud2::Ptr input_transformed_blob(new PCLPointCloud2);
+ toPCLPointCloud2(*inputs_transformed[i], *input_transformed_blob);
+ correspondence_estimations_[i]->setSourceNormals(input_transformed_blob);
}
// Estimate correspondences on each cloud pair separately
- if (use_reciprocal_correspondence_)
- {
- correspondence_estimations_[i]->determineReciprocalCorrespondences (*partial_correspondences_[i], corr_dist_threshold_);
+ if (use_reciprocal_correspondence_) {
+ correspondence_estimations_[i]->determineReciprocalCorrespondences(
+ *partial_correspondences_[i], corr_dist_threshold_);
}
- else
- {
- correspondence_estimations_[i]->determineCorrespondences (*partial_correspondences_[i], corr_dist_threshold_);
+ else {
+ correspondence_estimations_[i]->determineCorrespondences(
+ *partial_correspondences_[i], corr_dist_threshold_);
}
- PCL_DEBUG ("[pcl::%s::computeTransformation] Found %d partial correspondences for cloud [%d]\n",
- getClassName ().c_str (),
- partial_correspondences_[i]->size (), i);
- for (std::size_t j = 0; j < partial_correspondences_[i]->size (); j++)
- {
- pcl::Correspondence corr = partial_correspondences_[i]->at (j);
+ PCL_DEBUG("[pcl::%s::computeTransformation] Found %d partial correspondences for "
+ "cloud [%d]\n",
+ getClassName().c_str(),
+ partial_correspondences_[i]->size(),
+ i);
+ for (std::size_t j = 0; j < partial_correspondences_[i]->size(); j++) {
+ pcl::Correspondence corr = partial_correspondences_[i]->at(j);
// Update the offsets to be for the combined clouds
corr.index_query += input_offsets[i];
corr.index_match += target_offsets[i];
- correspondences_->push_back (corr);
+ correspondences_->push_back(corr);
}
}
- PCL_DEBUG ("[pcl::%s::computeTransformation] Total correspondences: %d\n", getClassName ().c_str (), correspondences_->size ());
+ PCL_DEBUG("[pcl::%s::computeTransformation] Total correspondences: %d\n",
+ getClassName().c_str(),
+ correspondences_->size());
PCLPointCloud2::Ptr inputs_transformed_combined_blob;
- if (need_source_blob_)
- {
- inputs_transformed_combined_blob.reset (new PCLPointCloud2);
- toPCLPointCloud2 (*inputs_transformed_combined, *inputs_transformed_combined_blob);
+ if (need_source_blob_) {
+ inputs_transformed_combined_blob.reset(new PCLPointCloud2);
+ toPCLPointCloud2(*inputs_transformed_combined, *inputs_transformed_combined_blob);
}
- CorrespondencesPtr temp_correspondences (new Correspondences (*correspondences_));
- for (std::size_t i = 0; i < correspondence_rejectors_.size (); ++i)
- {
- PCL_DEBUG ("Applying a correspondence rejector method: %s.\n", correspondence_rejectors_[i]->getClassName ().c_str ());
+ CorrespondencesPtr temp_correspondences(new Correspondences(*correspondences_));
+ for (std::size_t i = 0; i < correspondence_rejectors_.size(); ++i) {
+ PCL_DEBUG("Applying a correspondence rejector method: %s.\n",
+ correspondence_rejectors_[i]->getClassName().c_str());
registration::CorrespondenceRejector::Ptr& rej = correspondence_rejectors_[i];
- PCL_DEBUG ("Applying a correspondence rejector method: %s.\n", rej->getClassName ().c_str ());
- if (rej->requiresSourcePoints ())
- rej->setSourcePoints (inputs_transformed_combined_blob);
- if (rej->requiresSourceNormals () && source_has_normals_)
- rej->setSourceNormals (inputs_transformed_combined_blob);
- rej->setInputCorrespondences (temp_correspondences);
- rej->getCorrespondences (*correspondences_);
+ PCL_DEBUG("Applying a correspondence rejector method: %s.\n",
+ rej->getClassName().c_str());
+ if (rej->requiresSourcePoints())
+ rej->setSourcePoints(inputs_transformed_combined_blob);
+ if (rej->requiresSourceNormals() && source_has_normals_)
+ rej->setSourceNormals(inputs_transformed_combined_blob);
+ rej->setInputCorrespondences(temp_correspondences);
+ rej->getCorrespondences(*correspondences_);
// Modify input for the next iteration
- if (i < correspondence_rejectors_.size () - 1)
+ if (i < correspondence_rejectors_.size() - 1)
*temp_correspondences = *correspondences_;
}
- int cnt = correspondences_->size ();
+ int cnt = correspondences_->size();
// Check whether we have enough correspondences
- if (cnt < min_number_correspondences_)
- {
- PCL_ERROR ("[pcl::%s::computeTransformation] Not enough correspondences found. Relax your threshold parameters.\n", getClassName ().c_str ());
- convergence_criteria_->setConvergenceState(pcl::registration::DefaultConvergenceCriteria<Scalar>::CONVERGENCE_CRITERIA_NO_CORRESPONDENCES);
+ if (cnt < min_number_correspondences_) {
+ PCL_ERROR("[pcl::%s::computeTransformation] Not enough correspondences found. "
+ "Relax your threshold parameters.\n",
+ getClassName().c_str());
+ convergence_criteria_->setConvergenceState(
+ pcl::registration::DefaultConvergenceCriteria<
+ Scalar>::CONVERGENCE_CRITERIA_NO_CORRESPONDENCES);
converged_ = false;
break;
}
// Estimate the transform jointly, on a combined correspondence set
- transformation_estimation_->estimateRigidTransformation (*inputs_transformed_combined, *targets_combined, *correspondences_, transformation_);
+ transformation_estimation_->estimateRigidTransformation(
+ *inputs_transformed_combined,
+ *targets_combined,
+ *correspondences_,
+ transformation_);
// Transform the combined data
- this->transformCloud (*inputs_transformed_combined, *inputs_transformed_combined, transformation_);
+ this->transformCloud(
+ *inputs_transformed_combined, *inputs_transformed_combined, transformation_);
// And all its components
- for (std::size_t i = 0; i < sources_.size (); i++)
- {
- this->transformCloud (*inputs_transformed[i], *inputs_transformed[i], transformation_);
+ for (std::size_t i = 0; i < sources_.size(); i++) {
+ this->transformCloud(
+ *inputs_transformed[i], *inputs_transformed[i], transformation_);
}
// Obtain the final transformation
++nr_iterations_;
// Update the vizualization of icp convergence
- //if (update_visualizer_ != 0)
+ // if (update_visualizer_ != 0)
// update_visualizer_(output, source_indices_good, *target_, target_indices_good );
- converged_ = static_cast<bool> ((*convergence_criteria_));
- }
- while (!converged_);
-
- PCL_DEBUG ("Transformation is:\n\t%5f\t%5f\t%5f\t%5f\n\t%5f\t%5f\t%5f\t%5f\n\t%5f\t%5f\t%5f\t%5f\n\t%5f\t%5f\t%5f\t%5f\n",
- final_transformation_ (0, 0), final_transformation_ (0, 1), final_transformation_ (0, 2), final_transformation_ (0, 3),
- final_transformation_ (1, 0), final_transformation_ (1, 1), final_transformation_ (1, 2), final_transformation_ (1, 3),
- final_transformation_ (2, 0), final_transformation_ (2, 1), final_transformation_ (2, 2), final_transformation_ (2, 3),
- final_transformation_ (3, 0), final_transformation_ (3, 1), final_transformation_ (3, 2), final_transformation_ (3, 3));
-
- // For fitness checks, etc, we'll use an aggregated cloud for now (should be evaluating independently for correctness, but this requires propagating a few virtual methods from Registration)
- IterativeClosestPoint<PointSource, PointTarget, Scalar>::setInputSource (sources_combined);
- IterativeClosestPoint<PointSource, PointTarget, Scalar>::setInputTarget (targets_combined);
+ converged_ = static_cast<bool>((*convergence_criteria_));
+ } while (!converged_);
+
+ PCL_DEBUG("Transformation "
+ "is:\n\t%5f\t%5f\t%5f\t%5f\n\t%5f\t%5f\t%5f\t%5f\n\t%5f\t%5f\t%5f\t%5f\n\t%"
+ "5f\t%5f\t%5f\t%5f\n",
+ final_transformation_(0, 0),
+ final_transformation_(0, 1),
+ final_transformation_(0, 2),
+ final_transformation_(0, 3),
+ final_transformation_(1, 0),
+ final_transformation_(1, 1),
+ final_transformation_(1, 2),
+ final_transformation_(1, 3),
+ final_transformation_(2, 0),
+ final_transformation_(2, 1),
+ final_transformation_(2, 2),
+ final_transformation_(2, 3),
+ final_transformation_(3, 0),
+ final_transformation_(3, 1),
+ final_transformation_(3, 2),
+ final_transformation_(3, 3));
+
+ // For fitness checks, etc, we'll use an aggregated cloud for now (should be
+ // evaluating independently for correctness, but this requires propagating a few
+ // virtual methods from Registration)
+ IterativeClosestPoint<PointSource, PointTarget, Scalar>::setInputSource(
+ sources_combined);
+ IterativeClosestPoint<PointSource, PointTarget, Scalar>::setInputTarget(
+ targets_combined);
// If we automatically set the correspondence estimators, we should clear them now
- if (!manual_correspondence_estimations_set)
- {
- correspondence_estimations_.clear ();
+ if (!manual_correspondence_estimations_set) {
+ correspondence_estimations_.clear();
}
-
- // By definition, this method will return an empty cloud (for compliance with the ICP API).
- // We can figure out a better solution, if necessary.
- output = PointCloudSource ();
+ // By definition, this method will return an empty cloud (for compliance with the ICP
+ // API). We can figure out a better solution, if necessary.
+ output = PointCloudSource();
}
-
-template <typename PointSource, typename PointTarget, typename Scalar> void
-JointIterativeClosestPoint<PointSource, PointTarget, Scalar>::determineRequiredBlobData ()
+template <typename PointSource, typename PointTarget, typename Scalar>
+void
+JointIterativeClosestPoint<PointSource, PointTarget, Scalar>::
+ determineRequiredBlobData()
{
need_source_blob_ = false;
need_target_blob_ = false;
// Check estimators
- for (std::size_t i = 0; i < correspondence_estimations_.size (); i++)
- {
+ for (std::size_t i = 0; i < correspondence_estimations_.size(); i++) {
CorrespondenceEstimationPtr& ce = correspondence_estimations_[i];
- need_source_blob_ |= ce->requiresSourceNormals ();
- need_target_blob_ |= ce->requiresTargetNormals ();
+ need_source_blob_ |= ce->requiresSourceNormals();
+ need_target_blob_ |= ce->requiresTargetNormals();
// Add warnings if necessary
- if (ce->requiresSourceNormals () && !source_has_normals_)
- {
- PCL_WARN("[pcl::%s::determineRequiredBlobData] Estimator expects source normals, but we can't provide them.\n", getClassName ().c_str ());
+ if (ce->requiresSourceNormals() && !source_has_normals_) {
+ PCL_WARN("[pcl::%s::determineRequiredBlobData] Estimator expects source normals, "
+ "but we can't provide them.\n",
+ getClassName().c_str());
}
- if (ce->requiresTargetNormals () && !target_has_normals_)
- {
- PCL_WARN("[pcl::%s::determineRequiredBlobData] Estimator expects target normals, but we can't provide them.\n", getClassName ().c_str ());
+ if (ce->requiresTargetNormals() && !target_has_normals_) {
+ PCL_WARN("[pcl::%s::determineRequiredBlobData] Estimator expects target normals, "
+ "but we can't provide them.\n",
+ getClassName().c_str());
}
}
// Check rejectors
- for (std::size_t i = 0; i < correspondence_rejectors_.size (); i++)
- {
+ for (std::size_t i = 0; i < correspondence_rejectors_.size(); i++) {
registration::CorrespondenceRejector::Ptr& rej = correspondence_rejectors_[i];
- need_source_blob_ |= rej->requiresSourcePoints ();
- need_source_blob_ |= rej->requiresSourceNormals ();
- need_target_blob_ |= rej->requiresTargetPoints ();
- need_target_blob_ |= rej->requiresTargetNormals ();
- if (rej->requiresSourceNormals () && !source_has_normals_)
- {
- PCL_WARN("[pcl::%s::determineRequiredBlobData] Rejector %s expects source normals, but we can't provide them.\n", getClassName ().c_str (), rej->getClassName ().c_str ());
+ need_source_blob_ |= rej->requiresSourcePoints();
+ need_source_blob_ |= rej->requiresSourceNormals();
+ need_target_blob_ |= rej->requiresTargetPoints();
+ need_target_blob_ |= rej->requiresTargetNormals();
+ if (rej->requiresSourceNormals() && !source_has_normals_) {
+ PCL_WARN("[pcl::%s::determineRequiredBlobData] Rejector %s expects source "
+ "normals, but we can't provide them.\n",
+ getClassName().c_str(),
+ rej->getClassName().c_str());
}
- if (rej->requiresTargetNormals () && !target_has_normals_)
- {
- PCL_WARN("[pcl::%s::determineRequiredBlobData] Rejector %s expects target normals, but we can't provide them.\n", getClassName ().c_str (), rej->getClassName ().c_str ());
+ if (rej->requiresTargetNormals() && !target_has_normals_) {
+ PCL_WARN("[pcl::%s::determineRequiredBlobData] Rejector %s expects target "
+ "normals, but we can't provide them.\n",
+ getClassName().c_str(),
+ rej->getClassName().c_str());
}
}
}
} // namespace pcl
#endif /* PCL_REGISTRATION_IMPL_JOINT_ICP_HPP_ */
-
#include <tuple>
+namespace pcl {
-namespace pcl
-{
-
-namespace registration
-{
+namespace registration {
-template<typename PointT> inline void
-LUM<PointT>::setLoopGraph (const SLAMGraphPtr &slam_graph)
+template <typename PointT>
+inline void
+LUM<PointT>::setLoopGraph(const SLAMGraphPtr& slam_graph)
{
slam_graph_ = slam_graph;
}
-
-template<typename PointT> inline typename LUM<PointT>::SLAMGraphPtr
-LUM<PointT>::getLoopGraph () const
+template <typename PointT>
+inline typename LUM<PointT>::SLAMGraphPtr
+LUM<PointT>::getLoopGraph() const
{
return (slam_graph_);
}
-
-template<typename PointT> typename LUM<PointT>::SLAMGraph::vertices_size_type
-LUM<PointT>::getNumVertices () const
+template <typename PointT>
+typename LUM<PointT>::SLAMGraph::vertices_size_type
+LUM<PointT>::getNumVertices() const
{
- return (num_vertices (*slam_graph_));
+ return (num_vertices(*slam_graph_));
}
-
-template<typename PointT> void
-LUM<PointT>::setMaxIterations (int max_iterations)
+template <typename PointT>
+void
+LUM<PointT>::setMaxIterations(int max_iterations)
{
max_iterations_ = max_iterations;
}
-
-template<typename PointT> inline int
-LUM<PointT>::getMaxIterations () const
+template <typename PointT>
+inline int
+LUM<PointT>::getMaxIterations() const
{
return (max_iterations_);
}
-
-template<typename PointT> void
-LUM<PointT>::setConvergenceThreshold (float convergence_threshold)
+template <typename PointT>
+void
+LUM<PointT>::setConvergenceThreshold(float convergence_threshold)
{
convergence_threshold_ = convergence_threshold;
}
-
-template<typename PointT> inline float
-LUM<PointT>::getConvergenceThreshold () const
+template <typename PointT>
+inline float
+LUM<PointT>::getConvergenceThreshold() const
{
return (convergence_threshold_);
}
-
-template<typename PointT> typename LUM<PointT>::Vertex
-LUM<PointT>::addPointCloud (const PointCloudPtr &cloud, const Eigen::Vector6f &pose)
+template <typename PointT>
+typename LUM<PointT>::Vertex
+LUM<PointT>::addPointCloud(const PointCloudPtr& cloud, const Eigen::Vector6f& pose)
{
- Vertex v = add_vertex (*slam_graph_);
+ Vertex v = add_vertex(*slam_graph_);
(*slam_graph_)[v].cloud_ = cloud;
- if (v == 0 && pose != Eigen::Vector6f::Zero ())
- {
- PCL_WARN("[pcl::registration::LUM::addPointCloud] The pose estimate is ignored for the first cloud in the graph since that will become the reference pose.\n");
- (*slam_graph_)[v].pose_ = Eigen::Vector6f::Zero ();
+ if (v == 0 && pose != Eigen::Vector6f::Zero()) {
+ PCL_WARN(
+ "[pcl::registration::LUM::addPointCloud] The pose estimate is ignored for the "
+ "first cloud in the graph since that will become the reference pose.\n");
+ (*slam_graph_)[v].pose_ = Eigen::Vector6f::Zero();
return (v);
}
(*slam_graph_)[v].pose_ = pose;
return (v);
}
-
-template<typename PointT> inline void
-LUM<PointT>::setPointCloud (const Vertex &vertex, const PointCloudPtr &cloud)
+template <typename PointT>
+inline void
+LUM<PointT>::setPointCloud(const Vertex& vertex, const PointCloudPtr& cloud)
{
- if (vertex >= getNumVertices ())
- {
- PCL_ERROR("[pcl::registration::LUM::setPointCloud] You are attempting to set a point cloud to a non-existing graph vertex.\n");
+ if (vertex >= getNumVertices()) {
+ PCL_ERROR("[pcl::registration::LUM::setPointCloud] You are attempting to set a "
+ "point cloud to a non-existing graph vertex.\n");
return;
}
(*slam_graph_)[vertex].cloud_ = cloud;
}
-
-template<typename PointT> inline typename LUM<PointT>::PointCloudPtr
-LUM<PointT>::getPointCloud (const Vertex &vertex) const
+template <typename PointT>
+inline typename LUM<PointT>::PointCloudPtr
+LUM<PointT>::getPointCloud(const Vertex& vertex) const
{
- if (vertex >= getNumVertices ())
- {
- PCL_ERROR("[pcl::registration::LUM::getPointCloud] You are attempting to get a point cloud from a non-existing graph vertex.\n");
- return (PointCloudPtr ());
+ if (vertex >= getNumVertices()) {
+ PCL_ERROR("[pcl::registration::LUM::getPointCloud] You are attempting to get a "
+ "point cloud from a non-existing graph vertex.\n");
+ return (PointCloudPtr());
}
return ((*slam_graph_)[vertex].cloud_);
}
-
-template<typename PointT> inline void
-LUM<PointT>::setPose (const Vertex &vertex, const Eigen::Vector6f &pose)
+template <typename PointT>
+inline void
+LUM<PointT>::setPose(const Vertex& vertex, const Eigen::Vector6f& pose)
{
- if (vertex >= getNumVertices ())
- {
- PCL_ERROR("[pcl::registration::LUM::setPose] You are attempting to set a pose estimate to a non-existing graph vertex.\n");
+ if (vertex >= getNumVertices()) {
+ PCL_ERROR("[pcl::registration::LUM::setPose] You are attempting to set a pose "
+ "estimate to a non-existing graph vertex.\n");
return;
}
- if (vertex == 0)
- {
- PCL_ERROR("[pcl::registration::LUM::setPose] The pose estimate is ignored for the first cloud in the graph since that will become the reference pose.\n");
+ if (vertex == 0) {
+ PCL_ERROR("[pcl::registration::LUM::setPose] The pose estimate is ignored for the "
+ "first cloud in the graph since that will become the reference pose.\n");
return;
}
(*slam_graph_)[vertex].pose_ = pose;
}
-
-template<typename PointT> inline Eigen::Vector6f
-LUM<PointT>::getPose (const Vertex &vertex) const
+template <typename PointT>
+inline Eigen::Vector6f
+LUM<PointT>::getPose(const Vertex& vertex) const
{
- if (vertex >= getNumVertices ())
- {
- PCL_ERROR("[pcl::registration::LUM::getPose] You are attempting to get a pose estimate from a non-existing graph vertex.\n");
- return (Eigen::Vector6f::Zero ());
+ if (vertex >= getNumVertices()) {
+ PCL_ERROR("[pcl::registration::LUM::getPose] You are attempting to get a pose "
+ "estimate from a non-existing graph vertex.\n");
+ return (Eigen::Vector6f::Zero());
}
return ((*slam_graph_)[vertex].pose_);
}
-
-template<typename PointT> inline Eigen::Affine3f
-LUM<PointT>::getTransformation (const Vertex &vertex) const
+template <typename PointT>
+inline Eigen::Affine3f
+LUM<PointT>::getTransformation(const Vertex& vertex) const
{
- Eigen::Vector6f pose = getPose (vertex);
- return (pcl::getTransformation (pose (0), pose (1), pose (2), pose (3), pose (4), pose (5)));
+ Eigen::Vector6f pose = getPose(vertex);
+ return (pcl::getTransformation(pose(0), pose(1), pose(2), pose(3), pose(4), pose(5)));
}
-
-template<typename PointT> void
-LUM<PointT>::setCorrespondences (const Vertex &source_vertex, const Vertex &target_vertex, const pcl::CorrespondencesPtr &corrs)
+template <typename PointT>
+void
+LUM<PointT>::setCorrespondences(const Vertex& source_vertex,
+ const Vertex& target_vertex,
+ const pcl::CorrespondencesPtr& corrs)
{
- if (source_vertex >= getNumVertices () || target_vertex >= getNumVertices () || source_vertex == target_vertex)
- {
- PCL_ERROR("[pcl::registration::LUM::setCorrespondences] You are attempting to set a set of correspondences between non-existing or identical graph vertices.\n");
+ if (source_vertex >= getNumVertices() || target_vertex >= getNumVertices() ||
+ source_vertex == target_vertex) {
+ PCL_ERROR(
+ "[pcl::registration::LUM::setCorrespondences] You are attempting to set a set "
+ "of correspondences between non-existing or identical graph vertices.\n");
return;
}
Edge e;
bool present;
- std::tie (e, present) = edge (source_vertex, target_vertex, *slam_graph_);
+ std::tie(e, present) = edge(source_vertex, target_vertex, *slam_graph_);
if (!present)
- std::tie (e, present) = add_edge (source_vertex, target_vertex, *slam_graph_);
+ std::tie(e, present) = add_edge(source_vertex, target_vertex, *slam_graph_);
(*slam_graph_)[e].corrs_ = corrs;
}
-
-template<typename PointT> inline pcl::CorrespondencesPtr
-LUM<PointT>::getCorrespondences (const Vertex &source_vertex, const Vertex &target_vertex) const
+template <typename PointT>
+inline pcl::CorrespondencesPtr
+LUM<PointT>::getCorrespondences(const Vertex& source_vertex,
+ const Vertex& target_vertex) const
{
- if (source_vertex >= getNumVertices () || target_vertex >= getNumVertices ())
- {
- PCL_ERROR("[pcl::registration::LUM::getCorrespondences] You are attempting to get a set of correspondences between non-existing graph vertices.\n");
- return (pcl::CorrespondencesPtr ());
+ if (source_vertex >= getNumVertices() || target_vertex >= getNumVertices()) {
+ PCL_ERROR("[pcl::registration::LUM::getCorrespondences] You are attempting to get "
+ "a set of correspondences between non-existing graph vertices.\n");
+ return (pcl::CorrespondencesPtr());
}
Edge e;
bool present;
- std::tie (e, present) = edge (source_vertex, target_vertex, *slam_graph_);
- if (!present)
- {
- PCL_ERROR("[pcl::registration::LUM::getCorrespondences] You are attempting to get a set of correspondences from a non-existing graph edge.\n");
- return (pcl::CorrespondencesPtr ());
+ std::tie(e, present) = edge(source_vertex, target_vertex, *slam_graph_);
+ if (!present) {
+ PCL_ERROR("[pcl::registration::LUM::getCorrespondences] You are attempting to get "
+ "a set of correspondences from a non-existing graph edge.\n");
+ return (pcl::CorrespondencesPtr());
}
return ((*slam_graph_)[e].corrs_);
}
-
-template<typename PointT> void
-LUM<PointT>::compute ()
+template <typename PointT>
+void
+LUM<PointT>::compute()
{
- int n = static_cast<int> (getNumVertices ());
- if (n < 2)
- {
- PCL_ERROR("[pcl::registration::LUM::compute] The slam graph needs at least 2 vertices.\n");
+ int n = static_cast<int>(getNumVertices());
+ if (n < 2) {
+ PCL_ERROR("[pcl::registration::LUM::compute] The slam graph needs at least 2 "
+ "vertices.\n");
return;
}
- for (int i = 0; i < max_iterations_; ++i)
- {
- // Linearized computation of C^-1 and C^-1*D and convergence checking for all edges in the graph (results stored in slam_graph_)
+ for (int i = 0; i < max_iterations_; ++i) {
+ // Linearized computation of C^-1 and C^-1*D and convergence checking for all edges
+ // in the graph (results stored in slam_graph_)
typename SLAMGraph::edge_iterator e, e_end;
- for (std::tie (e, e_end) = edges (*slam_graph_); e != e_end; ++e)
- computeEdge (*e);
+ for (std::tie(e, e_end) = edges(*slam_graph_); e != e_end; ++e)
+ computeEdge(*e);
// Declare matrices G and B
- Eigen::MatrixXf G = Eigen::MatrixXf::Zero (6 * (n - 1), 6 * (n - 1));
- Eigen::VectorXf B = Eigen::VectorXf::Zero (6 * (n - 1));
+ Eigen::MatrixXf G = Eigen::MatrixXf::Zero(6 * (n - 1), 6 * (n - 1));
+ Eigen::VectorXf B = Eigen::VectorXf::Zero(6 * (n - 1));
// Start at 1 because 0 is the reference pose
- for (int vi = 1; vi != n; ++vi)
- {
- for (int vj = 0; vj != n; ++vj)
- {
- // Attempt to use the forward edge, otherwise use backward edge, otherwise there was no edge
+ for (int vi = 1; vi != n; ++vi) {
+ for (int vj = 0; vj != n; ++vj) {
+ // Attempt to use the forward edge, otherwise use backward edge, otherwise there
+ // was no edge
Edge e;
bool present1;
- std::tie (e, present1) = edge (vi, vj, *slam_graph_);
- if (!present1)
- {
+ std::tie(e, present1) = edge(vi, vj, *slam_graph_);
+ if (!present1) {
bool present2;
- std::tie (e, present2) = edge (vj, vi, *slam_graph_);
+ std::tie(e, present2) = edge(vj, vi, *slam_graph_);
if (!present2)
continue;
}
// Fill in elements of G and B
if (vj > 0)
- G.block (6 * (vi - 1), 6 * (vj - 1), 6, 6) = -(*slam_graph_)[e].cinv_;
- G.block (6 * (vi - 1), 6 * (vi - 1), 6, 6) += (*slam_graph_)[e].cinv_;
- B.segment (6 * (vi - 1), 6) += (present1 ? 1 : -1) * (*slam_graph_)[e].cinvd_;
+ G.block(6 * (vi - 1), 6 * (vj - 1), 6, 6) = -(*slam_graph_)[e].cinv_;
+ G.block(6 * (vi - 1), 6 * (vi - 1), 6, 6) += (*slam_graph_)[e].cinv_;
+ B.segment(6 * (vi - 1), 6) += (present1 ? 1 : -1) * (*slam_graph_)[e].cinvd_;
}
}
// Computation of the linear equation system: GX = B
- // TODO investigate accuracy vs. speed tradeoff and find the best solving method for our type of linear equation (sparse)
- Eigen::VectorXf X = G.colPivHouseholderQr ().solve (B);
+ // TODO investigate accuracy vs. speed tradeoff and find the best solving method for
+ // our type of linear equation (sparse)
+ Eigen::VectorXf X = G.colPivHouseholderQr().solve(B);
// Update the poses
float sum = 0.0;
- for (int vi = 1; vi != n; ++vi)
- {
- Eigen::Vector6f difference_pose = static_cast<Eigen::Vector6f> (-incidenceCorrection (getPose (vi)).inverse () * X.segment (6 * (vi - 1), 6));
- sum += difference_pose.norm ();
- setPose (vi, getPose (vi) + difference_pose);
+ for (int vi = 1; vi != n; ++vi) {
+ Eigen::Vector6f difference_pose = static_cast<Eigen::Vector6f>(
+ -incidenceCorrection(getPose(vi)).inverse() * X.segment(6 * (vi - 1), 6));
+ sum += difference_pose.norm();
+ setPose(vi, getPose(vi) + difference_pose);
}
// Convergence check
- if (sum <= convergence_threshold_ * static_cast<float> (n - 1))
+ if (sum <= convergence_threshold_ * static_cast<float>(n - 1))
return;
}
}
-
-template<typename PointT> typename LUM<PointT>::PointCloudPtr
-LUM<PointT>::getTransformedCloud (const Vertex &vertex) const
+template <typename PointT>
+typename LUM<PointT>::PointCloudPtr
+LUM<PointT>::getTransformedCloud(const Vertex& vertex) const
{
- PointCloudPtr out (new PointCloud);
- pcl::transformPointCloud (*getPointCloud (vertex), *out, getTransformation (vertex));
+ PointCloudPtr out(new PointCloud);
+ pcl::transformPointCloud(*getPointCloud(vertex), *out, getTransformation(vertex));
return (out);
}
-
-template<typename PointT> typename LUM<PointT>::PointCloudPtr
-LUM<PointT>::getConcatenatedCloud () const
+template <typename PointT>
+typename LUM<PointT>::PointCloudPtr
+LUM<PointT>::getConcatenatedCloud() const
{
- PointCloudPtr out (new PointCloud);
+ PointCloudPtr out(new PointCloud);
typename SLAMGraph::vertex_iterator v, v_end;
- for (std::tie (v, v_end) = vertices (*slam_graph_); v != v_end; ++v)
- {
+ for (std::tie(v, v_end) = vertices(*slam_graph_); v != v_end; ++v) {
PointCloud temp;
- pcl::transformPointCloud (*getPointCloud (*v), temp, getTransformation (*v));
+ pcl::transformPointCloud(*getPointCloud(*v), temp, getTransformation(*v));
*out += temp;
}
return (out);
}
-
-template<typename PointT> void
-LUM<PointT>::computeEdge (const Edge &e)
+template <typename PointT>
+void
+LUM<PointT>::computeEdge(const Edge& e)
{
// Get necessary local data from graph
- PointCloudPtr source_cloud = (*slam_graph_)[source (e, *slam_graph_)].cloud_;
- PointCloudPtr target_cloud = (*slam_graph_)[target (e, *slam_graph_)].cloud_;
- Eigen::Vector6f source_pose = (*slam_graph_)[source (e, *slam_graph_)].pose_;
- Eigen::Vector6f target_pose = (*slam_graph_)[target (e, *slam_graph_)].pose_;
+ PointCloudPtr source_cloud = (*slam_graph_)[source(e, *slam_graph_)].cloud_;
+ PointCloudPtr target_cloud = (*slam_graph_)[target(e, *slam_graph_)].cloud_;
+ Eigen::Vector6f source_pose = (*slam_graph_)[source(e, *slam_graph_)].pose_;
+ Eigen::Vector6f target_pose = (*slam_graph_)[target(e, *slam_graph_)].pose_;
pcl::CorrespondencesPtr corrs = (*slam_graph_)[e].corrs_;
// Build the average and difference vectors for all correspondences
- std::vector<Eigen::Vector3f, Eigen::aligned_allocator<Eigen::Vector3f> > corrs_aver (corrs->size ());
- std::vector<Eigen::Vector3f, Eigen::aligned_allocator<Eigen::Vector3f> > corrs_diff (corrs->size ());
- int oci = 0; // oci = output correspondence iterator
- for (int ici = 0; ici != static_cast<int> (corrs->size ()); ++ici) // ici = input correspondence iterator
+ std::vector<Eigen::Vector3f, Eigen::aligned_allocator<Eigen::Vector3f>> corrs_aver(
+ corrs->size());
+ std::vector<Eigen::Vector3f, Eigen::aligned_allocator<Eigen::Vector3f>> corrs_diff(
+ corrs->size());
+ int oci = 0; // oci = output correspondence iterator
+ for (const auto& icorr : (*corrs)) // icorr = input correspondence
{
// Compound the point pair onto the current pose
- Eigen::Vector3f source_compounded = pcl::getTransformation (source_pose (0), source_pose (1), source_pose (2), source_pose (3), source_pose (4), source_pose (5)) * (*source_cloud)[(*corrs)[ici].index_query].getVector3fMap ();
- Eigen::Vector3f target_compounded = pcl::getTransformation (target_pose (0), target_pose (1), target_pose (2), target_pose (3), target_pose (4), target_pose (5)) * (*target_cloud)[(*corrs)[ici].index_match].getVector3fMap ();
+ Eigen::Vector3f source_compounded =
+ pcl::getTransformation(source_pose(0),
+ source_pose(1),
+ source_pose(2),
+ source_pose(3),
+ source_pose(4),
+ source_pose(5)) *
+ (*source_cloud)[icorr.index_query].getVector3fMap();
+ Eigen::Vector3f target_compounded =
+ pcl::getTransformation(target_pose(0),
+ target_pose(1),
+ target_pose(2),
+ target_pose(3),
+ target_pose(4),
+ target_pose(5)) *
+ (*target_cloud)[icorr.index_match].getVector3fMap();
// NaN points can not be passed to the remaining computational pipeline
- if (!std::isfinite (source_compounded (0)) || !std::isfinite (source_compounded (1)) || !std::isfinite (source_compounded (2)) || !std::isfinite (target_compounded (0)) || !std::isfinite (target_compounded (1)) || !std::isfinite (target_compounded (2)))
+ if (!std::isfinite(source_compounded(0)) || !std::isfinite(source_compounded(1)) ||
+ !std::isfinite(source_compounded(2)) || !std::isfinite(target_compounded(0)) ||
+ !std::isfinite(target_compounded(1)) || !std::isfinite(target_compounded(2)))
continue;
// Compute the point pair average and difference and store for later use
corrs_diff[oci] = source_compounded - target_compounded;
oci++;
}
- corrs_aver.resize (oci);
- corrs_diff.resize (oci);
+ corrs_aver.resize(oci);
+ corrs_diff.resize(oci);
// Need enough valid correspondences to get a good triangulation
- if (oci < 3)
- {
- PCL_WARN("[pcl::registration::LUM::compute] The correspondences between vertex %d and %d do not contain enough valid correspondences to be considered for computation.\n", source (e, *slam_graph_), target (e, *slam_graph_));
- (*slam_graph_)[e].cinv_ = Eigen::Matrix6f::Zero ();
- (*slam_graph_)[e].cinvd_ = Eigen::Vector6f::Zero ();
+ if (oci < 3) {
+ PCL_WARN("[pcl::registration::LUM::compute] The correspondences between vertex %d "
+ "and %d do not contain enough valid correspondences to be considered for "
+ "computation.\n",
+ source(e, *slam_graph_),
+ target(e, *slam_graph_));
+ (*slam_graph_)[e].cinv_ = Eigen::Matrix6f::Zero();
+ (*slam_graph_)[e].cinvd_ = Eigen::Vector6f::Zero();
return;
}
// Build the matrices M'M and M'Z
- Eigen::Matrix6f MM = Eigen::Matrix6f::Zero ();
- Eigen::Vector6f MZ = Eigen::Vector6f::Zero ();
- for (int ci = 0; ci != oci; ++ci) // ci = correspondence iterator
+ Eigen::Matrix6f MM = Eigen::Matrix6f::Zero();
+ Eigen::Vector6f MZ = Eigen::Vector6f::Zero();
+ for (int ci = 0; ci != oci; ++ci) // ci = correspondence iterator
{
// Fast computation of summation elements of M'M
- MM (0, 4) -= corrs_aver[ci] (1);
- MM (0, 5) += corrs_aver[ci] (2);
- MM (1, 3) -= corrs_aver[ci] (2);
- MM (1, 4) += corrs_aver[ci] (0);
- MM (2, 3) += corrs_aver[ci] (1);
- MM (2, 5) -= corrs_aver[ci] (0);
- MM (3, 4) -= corrs_aver[ci] (0) * corrs_aver[ci] (2);
- MM (3, 5) -= corrs_aver[ci] (0) * corrs_aver[ci] (1);
- MM (4, 5) -= corrs_aver[ci] (1) * corrs_aver[ci] (2);
- MM (3, 3) += corrs_aver[ci] (1) * corrs_aver[ci] (1) + corrs_aver[ci] (2) * corrs_aver[ci] (2);
- MM (4, 4) += corrs_aver[ci] (0) * corrs_aver[ci] (0) + corrs_aver[ci] (1) * corrs_aver[ci] (1);
- MM (5, 5) += corrs_aver[ci] (0) * corrs_aver[ci] (0) + corrs_aver[ci] (2) * corrs_aver[ci] (2);
+ MM(0, 4) -= corrs_aver[ci](1);
+ MM(0, 5) += corrs_aver[ci](2);
+ MM(1, 3) -= corrs_aver[ci](2);
+ MM(1, 4) += corrs_aver[ci](0);
+ MM(2, 3) += corrs_aver[ci](1);
+ MM(2, 5) -= corrs_aver[ci](0);
+ MM(3, 4) -= corrs_aver[ci](0) * corrs_aver[ci](2);
+ MM(3, 5) -= corrs_aver[ci](0) * corrs_aver[ci](1);
+ MM(4, 5) -= corrs_aver[ci](1) * corrs_aver[ci](2);
+ MM(3, 3) +=
+ corrs_aver[ci](1) * corrs_aver[ci](1) + corrs_aver[ci](2) * corrs_aver[ci](2);
+ MM(4, 4) +=
+ corrs_aver[ci](0) * corrs_aver[ci](0) + corrs_aver[ci](1) * corrs_aver[ci](1);
+ MM(5, 5) +=
+ corrs_aver[ci](0) * corrs_aver[ci](0) + corrs_aver[ci](2) * corrs_aver[ci](2);
// Fast computation of M'Z
- MZ (0) += corrs_diff[ci] (0);
- MZ (1) += corrs_diff[ci] (1);
- MZ (2) += corrs_diff[ci] (2);
- MZ (3) += corrs_aver[ci] (1) * corrs_diff[ci] (2) - corrs_aver[ci] (2) * corrs_diff[ci] (1);
- MZ (4) += corrs_aver[ci] (0) * corrs_diff[ci] (1) - corrs_aver[ci] (1) * corrs_diff[ci] (0);
- MZ (5) += corrs_aver[ci] (2) * corrs_diff[ci] (0) - corrs_aver[ci] (0) * corrs_diff[ci] (2);
+ MZ(0) += corrs_diff[ci](0);
+ MZ(1) += corrs_diff[ci](1);
+ MZ(2) += corrs_diff[ci](2);
+ MZ(3) +=
+ corrs_aver[ci](1) * corrs_diff[ci](2) - corrs_aver[ci](2) * corrs_diff[ci](1);
+ MZ(4) +=
+ corrs_aver[ci](0) * corrs_diff[ci](1) - corrs_aver[ci](1) * corrs_diff[ci](0);
+ MZ(5) +=
+ corrs_aver[ci](2) * corrs_diff[ci](0) - corrs_aver[ci](0) * corrs_diff[ci](2);
}
// Remaining elements of M'M
- MM (0, 0) = MM (1, 1) = MM (2, 2) = static_cast<float> (oci);
- MM (4, 0) = MM (0, 4);
- MM (5, 0) = MM (0, 5);
- MM (3, 1) = MM (1, 3);
- MM (4, 1) = MM (1, 4);
- MM (3, 2) = MM (2, 3);
- MM (5, 2) = MM (2, 5);
- MM (4, 3) = MM (3, 4);
- MM (5, 3) = MM (3, 5);
- MM (5, 4) = MM (4, 5);
+ MM(0, 0) = MM(1, 1) = MM(2, 2) = static_cast<float>(oci);
+ MM(4, 0) = MM(0, 4);
+ MM(5, 0) = MM(0, 5);
+ MM(3, 1) = MM(1, 3);
+ MM(4, 1) = MM(1, 4);
+ MM(3, 2) = MM(2, 3);
+ MM(5, 2) = MM(2, 5);
+ MM(4, 3) = MM(3, 4);
+ MM(5, 3) = MM(3, 5);
+ MM(5, 4) = MM(4, 5);
// Compute pose difference estimation
- Eigen::Vector6f D = static_cast<Eigen::Vector6f> (MM.inverse () * MZ);
+ Eigen::Vector6f D = static_cast<Eigen::Vector6f>(MM.inverse() * MZ);
// Compute s^2
float ss = 0.0f;
- for (int ci = 0; ci != oci; ++ci) // ci = correspondence iterator
- ss += static_cast<float> (std::pow (corrs_diff[ci] (0) - (D (0) + corrs_aver[ci] (2) * D (5) - corrs_aver[ci] (1) * D (4)), 2.0f)
- + std::pow (corrs_diff[ci] (1) - (D (1) + corrs_aver[ci] (0) * D (4) - corrs_aver[ci] (2) * D (3)), 2.0f)
- + std::pow (corrs_diff[ci] (2) - (D (2) + corrs_aver[ci] (1) * D (3) - corrs_aver[ci] (0) * D (5)), 2.0f));
+ for (int ci = 0; ci != oci; ++ci) // ci = correspondence iterator
+ ss += static_cast<float>(
+ std::pow(corrs_diff[ci](0) -
+ (D(0) + corrs_aver[ci](2) * D(5) - corrs_aver[ci](1) * D(4)),
+ 2.0f) +
+ std::pow(corrs_diff[ci](1) -
+ (D(1) + corrs_aver[ci](0) * D(4) - corrs_aver[ci](2) * D(3)),
+ 2.0f) +
+ std::pow(corrs_diff[ci](2) -
+ (D(2) + corrs_aver[ci](1) * D(3) - corrs_aver[ci](0) * D(5)),
+ 2.0f));
// When reaching the limitations of computation due to linearization
- if (ss < 0.0000000000001 || !std::isfinite (ss))
- {
- (*slam_graph_)[e].cinv_ = Eigen::Matrix6f::Zero ();
- (*slam_graph_)[e].cinvd_ = Eigen::Vector6f::Zero ();
+ if (ss < 0.0000000000001 || !std::isfinite(ss)) {
+ (*slam_graph_)[e].cinv_ = Eigen::Matrix6f::Zero();
+ (*slam_graph_)[e].cinvd_ = Eigen::Vector6f::Zero();
return;
}
(*slam_graph_)[e].cinvd_ = MZ * (1.0f / ss);
}
-
-template<typename PointT> inline Eigen::Matrix6f
-LUM<PointT>::incidenceCorrection (const Eigen::Vector6f &pose)
+template <typename PointT>
+inline Eigen::Matrix6f
+LUM<PointT>::incidenceCorrection(const Eigen::Vector6f& pose)
{
- Eigen::Matrix6f out = Eigen::Matrix6f::Identity ();
- float cx = std::cos (pose (3)), sx = sinf (pose (3)), cy = std::cos (pose (4)), sy = sinf (pose (4));
- out (0, 4) = pose (1) * sx - pose (2) * cx;
- out (0, 5) = pose (1) * cx * cy + pose (2) * sx * cy;
- out (1, 3) = pose (2);
- out (1, 4) = -pose (0) * sx;
- out (1, 5) = -pose (0) * cx * cy + pose (2) * sy;
- out (2, 3) = -pose (1);
- out (2, 4) = pose (0) * cx;
- out (2, 5) = -pose (0) * sx * cy - pose (1) * sy;
- out (3, 5) = sy;
- out (4, 4) = sx;
- out (4, 5) = cx * cy;
- out (5, 4) = cx;
- out (5, 5) = -sx * cy;
+ Eigen::Matrix6f out = Eigen::Matrix6f::Identity();
+ float cx = std::cos(pose(3)), sx = sinf(pose(3)), cy = std::cos(pose(4)),
+ sy = sinf(pose(4));
+ out(0, 4) = pose(1) * sx - pose(2) * cx;
+ out(0, 5) = pose(1) * cx * cy + pose(2) * sx * cy;
+ out(1, 3) = pose(2);
+ out(1, 4) = -pose(0) * sx;
+ out(1, 5) = -pose(0) * cx * cy + pose(2) * sy;
+ out(2, 3) = -pose(1);
+ out(2, 4) = pose(0) * cx;
+ out(2, 5) = -pose(0) * sx * cy - pose(1) * sy;
+ out(3, 5) = sy;
+ out(4, 4) = sx;
+ out(4, 5) = cx * cy;
+ out(5, 4) = cx;
+ out(5, 5) = -sx * cy;
return (out);
}
#define PCL_INSTANTIATE_LUM(T) template class PCL_EXPORTS pcl::registration::LUM<T>;
-#endif // PCL_REGISTRATION_IMPL_LUM_HPP_
-
+#endif // PCL_REGISTRATION_IMPL_LUM_HPP_
#ifndef PCL_REGISTRATION_IMPL_META_REGISTRATION_HPP_
#define PCL_REGISTRATION_IMPL_META_REGISTRATION_HPP_
+namespace pcl {
-namespace pcl
-{
-
-namespace registration
-{
+namespace registration {
template <typename PointT, typename Scalar>
-MetaRegistration<PointT, Scalar>::MetaRegistration () :
- abs_transform_ (Matrix4::Identity ())
+MetaRegistration<PointT, Scalar>::MetaRegistration()
+: abs_transform_(Matrix4::Identity())
{}
-template <typename PointT, typename Scalar> bool
-MetaRegistration<PointT, Scalar>::registerCloud (const PointCloudConstPtr& new_cloud, const Matrix4& delta_estimate)
+template <typename PointT, typename Scalar>
+bool
+MetaRegistration<PointT, Scalar>::registerCloud(const PointCloudConstPtr& new_cloud,
+ const Matrix4& delta_estimate)
{
- assert (registration_);
+ assert(registration_);
- PointCloudPtr new_cloud_transformed (new pcl::PointCloud<PointT> ());
+ PointCloudPtr new_cloud_transformed(new pcl::PointCloud<PointT>());
- if (!full_cloud_)
- {
+ if (!full_cloud_) {
pcl::transformPointCloud(*new_cloud, *new_cloud_transformed, delta_estimate);
full_cloud_ = new_cloud_transformed;
abs_transform_ = delta_estimate;
return (true);
}
- registration_->setInputSource (new_cloud);
- registration_->setInputTarget (full_cloud_);
+ registration_->setInputSource(new_cloud);
+ registration_->setInputTarget(full_cloud_);
- registration_->align (*new_cloud_transformed, abs_transform_ * delta_estimate);
+ registration_->align(*new_cloud_transformed, abs_transform_ * delta_estimate);
- bool converged = registration_->hasConverged ();
+ bool converged = registration_->hasConverged();
- if (converged)
- {
- abs_transform_ = registration_->getFinalTransformation ();
+ if (converged) {
+ abs_transform_ = registration_->getFinalTransformation();
*full_cloud_ += *new_cloud_transformed;
}
return (converged);
}
-template <typename PointT, typename Scalar> inline typename MetaRegistration<PointT, Scalar>::Matrix4
-MetaRegistration<PointT, Scalar>::getAbsoluteTransform () const
+template <typename PointT, typename Scalar>
+inline typename MetaRegistration<PointT, Scalar>::Matrix4
+MetaRegistration<PointT, Scalar>::getAbsoluteTransform() const
{
return (abs_transform_);
}
-template <typename PointT, typename Scalar> inline void
-MetaRegistration<PointT, Scalar>::reset ()
+template <typename PointT, typename Scalar>
+inline void
+MetaRegistration<PointT, Scalar>::reset()
{
- full_cloud_.reset ();
- abs_transform_ = Matrix4::Identity ();
+ full_cloud_.reset();
+ abs_transform_ = Matrix4::Identity();
}
-template <typename PointT, typename Scalar> inline void
-MetaRegistration<PointT, Scalar>::setRegistration (RegistrationPtr reg)
+template <typename PointT, typename Scalar>
+inline void
+MetaRegistration<PointT, Scalar>::setRegistration(RegistrationPtr reg)
{
registration_ = reg;
}
-template <typename PointT, typename Scalar> inline typename MetaRegistration<PointT, Scalar>::PointCloudConstPtr
-MetaRegistration<PointT, Scalar>::getMetaCloud () const
+template <typename PointT, typename Scalar>
+inline typename MetaRegistration<PointT, Scalar>::PointCloudConstPtr
+MetaRegistration<PointT, Scalar>::getMetaCloud() const
{
return full_cloud_;
}
} // namespace pcl
#endif /*PCL_REGISTRATION_IMPL_META_REGISTRATION_HPP_*/
-
#ifndef PCL_REGISTRATION_NDT_IMPL_H_
#define PCL_REGISTRATION_NDT_IMPL_H_
-
-namespace pcl
-{
-
-template<typename PointSource, typename PointTarget>
-NormalDistributionsTransform<PointSource, PointTarget>::NormalDistributionsTransform ()
- : target_cells_ ()
- , resolution_ (1.0f)
- , step_size_ (0.1)
- , outlier_ratio_ (0.55)
- , gauss_d1_ ()
- , gauss_d2_ ()
- , trans_probability_ ()
+namespace pcl {
+
+template <typename PointSource, typename PointTarget>
+NormalDistributionsTransform<PointSource, PointTarget>::NormalDistributionsTransform()
+: target_cells_()
+, resolution_(1.0f)
+, step_size_(0.1)
+, outlier_ratio_(0.55)
+, gauss_d1_()
+, gauss_d2_()
+, trans_probability_()
{
reg_name_ = "NormalDistributionsTransform";
- double gauss_c1, gauss_c2, gauss_d3;
-
// Initializes the gaussian fitting parameters (eq. 6.8) [Magnusson 2009]
- gauss_c1 = 10.0 * (1 - outlier_ratio_);
- gauss_c2 = outlier_ratio_ / pow (resolution_, 3);
- gauss_d3 = -std::log (gauss_c2);
- gauss_d1_ = -std::log ( gauss_c1 + gauss_c2 ) - gauss_d3;
- gauss_d2_ = -2 * std::log ((-std::log ( gauss_c1 * std::exp ( -0.5 ) + gauss_c2 ) - gauss_d3) / gauss_d1_);
+ const double gauss_c1 = 10.0 * (1 - outlier_ratio_);
+ const double gauss_c2 = outlier_ratio_ / pow(resolution_, 3);
+ const double gauss_d3 = -std::log(gauss_c2);
+ gauss_d1_ = -std::log(gauss_c1 + gauss_c2) - gauss_d3;
+ gauss_d2_ =
+ -2 * std::log((-std::log(gauss_c1 * std::exp(-0.5) + gauss_c2) - gauss_d3) /
+ gauss_d1_);
transformation_epsilon_ = 0.1;
max_iterations_ = 35;
}
-
-template<typename PointSource, typename PointTarget> void
-NormalDistributionsTransform<PointSource, PointTarget>::computeTransformation (PointCloudSource &output, const Eigen::Matrix4f &guess)
+template <typename PointSource, typename PointTarget>
+void
+NormalDistributionsTransform<PointSource, PointTarget>::computeTransformation(
+ PointCloudSource& output, const Eigen::Matrix4f& guess)
{
nr_iterations_ = 0;
converged_ = false;
- double gauss_c1, gauss_c2, gauss_d3;
-
// Initializes the gaussian fitting parameters (eq. 6.8) [Magnusson 2009]
- gauss_c1 = 10 * (1 - outlier_ratio_);
- gauss_c2 = outlier_ratio_ / pow (resolution_, 3);
- gauss_d3 = -std::log (gauss_c2);
- gauss_d1_ = -std::log ( gauss_c1 + gauss_c2 ) - gauss_d3;
- gauss_d2_ = -2 * std::log ((-std::log ( gauss_c1 * std::exp ( -0.5 ) + gauss_c2 ) - gauss_d3) / gauss_d1_);
-
- if (guess != Eigen::Matrix4f::Identity ())
- {
+ const double gauss_c1 = 10 * (1 - outlier_ratio_);
+ const double gauss_c2 = outlier_ratio_ / pow(resolution_, 3);
+ const double gauss_d3 = -std::log(gauss_c2);
+ gauss_d1_ = -std::log(gauss_c1 + gauss_c2) - gauss_d3;
+ gauss_d2_ =
+ -2 * std::log((-std::log(gauss_c1 * std::exp(-0.5) + gauss_c2) - gauss_d3) /
+ gauss_d1_);
+
+ if (guess != Eigen::Matrix4f::Identity()) {
// Initialise final transformation to the guessed one
final_transformation_ = guess;
// Apply guessed transformation prior to search for neighbours
- transformPointCloud (output, output, guess);
+ transformPointCloud(output, output, guess);
}
// Initialize Point Gradient and Hessian
- point_gradient_.setZero ();
- point_gradient_.block<3, 3>(0, 0).setIdentity ();
- point_hessian_.setZero ();
+ point_jacobian_.setZero();
+ point_jacobian_.block<3, 3>(0, 0).setIdentity();
+ point_hessian_.setZero();
Eigen::Transform<float, 3, Eigen::Affine, Eigen::ColMajor> eig_transformation;
- eig_transformation.matrix () = final_transformation_;
+ eig_transformation.matrix() = final_transformation_;
// Convert initial guess matrix to 6 element transformation vector
- Eigen::Matrix<double, 6, 1> p, delta_p, score_gradient;
- Eigen::Vector3f init_translation = eig_transformation.translation ();
- Eigen::Vector3f init_rotation = eig_transformation.rotation ().eulerAngles (0, 1, 2);
- p << init_translation (0), init_translation (1), init_translation (2),
- init_rotation (0), init_rotation (1), init_rotation (2);
+ Eigen::Matrix<double, 6, 1> transform, score_gradient;
+ Eigen::Vector3f init_translation = eig_transformation.translation();
+ Eigen::Vector3f init_rotation = eig_transformation.rotation().eulerAngles(0, 1, 2);
+ transform << init_translation.cast<double>(), init_rotation.cast<double>();
Eigen::Matrix<double, 6, 6> hessian;
- double score = 0;
-
- // Calculate derivates of initial transform vector, subsequent derivative calculations are done in the step length determination.
- score = computeDerivatives (score_gradient, hessian, output, p);
+ // Calculate derivates of initial transform vector, subsequent derivative calculations
+ // are done in the step length determination.
+ double score = computeDerivatives(score_gradient, hessian, output, transform);
- while (!converged_)
- {
+ while (!converged_) {
// Store previous transformation
previous_transformation_ = transformation_;
- // Solve for decent direction using newton method, line 23 in Algorithm 2 [Magnusson 2009]
- Eigen::JacobiSVD<Eigen::Matrix<double, 6, 6> > sv (hessian, Eigen::ComputeFullU | Eigen::ComputeFullV);
+ // Solve for decent direction using newton method, line 23 in Algorithm 2 [Magnusson
+ // 2009]
+ Eigen::JacobiSVD<Eigen::Matrix<double, 6, 6>> sv(
+ hessian, Eigen::ComputeFullU | Eigen::ComputeFullV);
// Negative for maximization as opposed to minimization
- delta_p = sv.solve (-score_gradient);
+ Eigen::Matrix<double, 6, 1> delta = sv.solve(-score_gradient);
- //Calculate step length with guarnteed sufficient decrease [More, Thuente 1994]
- double delta_p_norm = delta_p.norm ();
+ // Calculate step length with guarnteed sufficient decrease [More, Thuente 1994]
+ double delta_norm = delta.norm();
- if (delta_p_norm == 0 || std::isnan(delta_p_norm))
- {
- trans_probability_ = score / static_cast<double> (input_->size ());
- converged_ = delta_p_norm == delta_p_norm;
+ if (delta_norm == 0 || std::isnan(delta_norm)) {
+ trans_probability_ = score / static_cast<double>(input_->size());
+ converged_ = delta_norm == 0;
return;
}
- delta_p.normalize ();
- delta_p_norm = computeStepLengthMT (p, delta_p, delta_p_norm, step_size_, transformation_epsilon_ / 2, score, score_gradient, hessian, output);
- delta_p *= delta_p_norm;
-
-
- transformation_ = (Eigen::Translation<float, 3> (static_cast<float> (delta_p (0)), static_cast<float> (delta_p (1)), static_cast<float> (delta_p (2))) *
- Eigen::AngleAxis<float> (static_cast<float> (delta_p (3)), Eigen::Vector3f::UnitX ()) *
- Eigen::AngleAxis<float> (static_cast<float> (delta_p (4)), Eigen::Vector3f::UnitY ()) *
- Eigen::AngleAxis<float> (static_cast<float> (delta_p (5)), Eigen::Vector3f::UnitZ ())).matrix ();
+ delta /= delta_norm;
+ delta_norm = computeStepLengthMT(transform,
+ delta,
+ delta_norm,
+ step_size_,
+ transformation_epsilon_ / 2,
+ score,
+ score_gradient,
+ hessian,
+ output);
+ delta *= delta_norm;
+ // Convert delta into matrix form
+ convertTransform(delta, transformation_);
- p += delta_p;
+ transform += delta;
// Update Visualizer (untested)
if (update_visualizer_)
- update_visualizer_ (output, std::vector<int>(), *target_, std::vector<int>() );
+ update_visualizer_(output, pcl::Indices(), *target_, pcl::Indices());
- double cos_angle = 0.5 * (transformation_.coeff (0, 0) + transformation_.coeff (1, 1) + transformation_.coeff (2, 2) - 1);
- double translation_sqr = transformation_.coeff (0, 3) * transformation_.coeff (0, 3) +
- transformation_.coeff (1, 3) * transformation_.coeff (1, 3) +
- transformation_.coeff (2, 3) * transformation_.coeff (2, 3);
+ const double cos_angle =
+ 0.5 * transformation_.template block<3, 3>(0, 0).trace() - 1;
+ const double translation_sqr =
+ transformation_.template block<3, 1>(0, 3).squaredNorm();
nr_iterations_++;
if (nr_iterations_ >= max_iterations_ ||
- ((transformation_epsilon_ > 0 && translation_sqr <= transformation_epsilon_) && (transformation_rotation_epsilon_ > 0 && cos_angle >= transformation_rotation_epsilon_)) ||
- ((transformation_epsilon_ <= 0) && (transformation_rotation_epsilon_ > 0 && cos_angle >= transformation_rotation_epsilon_)) ||
- ((transformation_epsilon_ > 0 && translation_sqr <= transformation_epsilon_) && (transformation_rotation_epsilon_ <= 0)))
- {
+ ((transformation_epsilon_ > 0 && translation_sqr <= transformation_epsilon_) &&
+ (transformation_rotation_epsilon_ > 0 &&
+ cos_angle >= transformation_rotation_epsilon_)) ||
+ ((transformation_epsilon_ <= 0) &&
+ (transformation_rotation_epsilon_ > 0 &&
+ cos_angle >= transformation_rotation_epsilon_)) ||
+ ((transformation_epsilon_ > 0 && translation_sqr <= transformation_epsilon_) &&
+ (transformation_rotation_epsilon_ <= 0))) {
converged_ = true;
}
}
- // Store transformation probability. The realtive differences within each scan registration are accurate
- // but the normalization constants need to be modified for it to be globally accurate
- trans_probability_ = score / static_cast<double> (input_->size ());
+ // Store transformation probability. The realtive differences within each scan
+ // registration are accurate but the normalization constants need to be modified for
+ // it to be globally accurate
+ trans_probability_ = score / static_cast<double>(input_->size());
}
-
-template<typename PointSource, typename PointTarget> double
-NormalDistributionsTransform<PointSource, PointTarget>::computeDerivatives (Eigen::Matrix<double, 6, 1> &score_gradient,
- Eigen::Matrix<double, 6, 6> &hessian,
- PointCloudSource &trans_cloud,
- Eigen::Matrix<double, 6, 1> &p,
- bool compute_hessian)
+template <typename PointSource, typename PointTarget>
+double
+NormalDistributionsTransform<PointSource, PointTarget>::computeDerivatives(
+ Eigen::Matrix<double, 6, 1>& score_gradient,
+ Eigen::Matrix<double, 6, 6>& hessian,
+ const PointCloudSource& trans_cloud,
+ const Eigen::Matrix<double, 6, 1>& transform,
+ bool compute_hessian)
{
- // Original Point and Transformed Point
- PointSource x_pt, x_trans_pt;
- // Original Point and Transformed Point (for math)
- Eigen::Vector3d x, x_trans;
- // Occupied Voxel
- TargetGridLeafConstPtr cell;
- // Inverse Covariance of Occupied Voxel
- Eigen::Matrix3d c_inv;
-
- score_gradient.setZero ();
- hessian.setZero ();
+ score_gradient.setZero();
+ hessian.setZero();
double score = 0;
// Precompute Angular Derivatives (eq. 6.19 and 6.21)[Magnusson 2009]
- computeAngleDerivatives (p);
+ computeAngleDerivatives(transform);
// Update gradient and hessian for each point, line 17 in Algorithm 2 [Magnusson 2009]
- for (std::size_t idx = 0; idx < input_->size (); idx++)
- {
- x_trans_pt = trans_cloud[idx];
+ for (std::size_t idx = 0; idx < input_->size(); idx++) {
+ // Transformed Point
+ const auto& x_trans_pt = trans_cloud[idx];
- // Find nieghbors (Radius search has been experimentally faster than direct neighbor checking.
+ // Find neighbors (Radius search has been experimentally faster than direct neighbor
+ // checking.
std::vector<TargetGridLeafConstPtr> neighborhood;
std::vector<float> distances;
- target_cells_.radiusSearch (x_trans_pt, resolution_, neighborhood, distances);
-
- for (typename std::vector<TargetGridLeafConstPtr>::iterator neighborhood_it = neighborhood.begin (); neighborhood_it != neighborhood.end (); ++neighborhood_it)
- {
- cell = *neighborhood_it;
- x_pt = (*input_)[idx];
- x = Eigen::Vector3d (x_pt.x, x_pt.y, x_pt.z);
+ target_cells_.radiusSearch(x_trans_pt, resolution_, neighborhood, distances);
- x_trans = Eigen::Vector3d (x_trans_pt.x, x_trans_pt.y, x_trans_pt.z);
+ for (const auto& cell : neighborhood) {
+ // Original Point
+ const auto& x_pt = (*input_)[idx];
+ const Eigen::Vector3d x = x_pt.getVector3fMap().template cast<double>();
// Denorm point, x_k' in Equations 6.12 and 6.13 [Magnusson 2009]
- x_trans -= cell->getMean ();
+ const Eigen::Vector3d x_trans =
+ x_trans_pt.getVector3fMap().template cast<double>() - cell->getMean();
+ // Inverse Covariance of Occupied Voxel
// Uses precomputed covariance for speed.
- c_inv = cell->getInverseCov ();
-
- // Compute derivative of transform function w.r.t. transform vector, J_E and H_E in Equations 6.18 and 6.20 [Magnusson 2009]
- computePointDerivatives (x);
- // Update score, gradient and hessian, lines 19-21 in Algorithm 2, according to Equations 6.10, 6.12 and 6.13, respectively [Magnusson 2009]
- score += updateDerivatives (score_gradient, hessian, x_trans, c_inv, compute_hessian);
-
+ const Eigen::Matrix3d c_inv = cell->getInverseCov();
+
+ // Compute derivative of transform function w.r.t. transform vector, J_E and H_E
+ // in Equations 6.18 and 6.20 [Magnusson 2009]
+ computePointDerivatives(x);
+ // Update score, gradient and hessian, lines 19-21 in Algorithm 2, according to
+ // Equations 6.10, 6.12 and 6.13, respectively [Magnusson 2009]
+ score +=
+ updateDerivatives(score_gradient, hessian, x_trans, c_inv, compute_hessian);
}
}
- return (score);
+ return score;
}
-
-template<typename PointSource, typename PointTarget> void
-NormalDistributionsTransform<PointSource, PointTarget>::computeAngleDerivatives (Eigen::Matrix<double, 6, 1> &p, bool compute_hessian)
+template <typename PointSource, typename PointTarget>
+void
+NormalDistributionsTransform<PointSource, PointTarget>::computeAngleDerivatives(
+ const Eigen::Matrix<double, 6, 1>& transform, bool compute_hessian)
{
// Simplified math for near 0 angles
- double cx, cy, cz, sx, sy, sz;
- if (std::abs (p (3)) < 10e-5)
- {
- //p(3) = 0;
- cx = 1.0;
- sx = 0.0;
- }
- else
- {
- cx = std::cos (p (3));
- sx = sin (p (3));
- }
- if (std::abs (p (4)) < 10e-5)
- {
- //p(4) = 0;
- cy = 1.0;
- sy = 0.0;
- }
- else
- {
- cy = std::cos (p (4));
- sy = sin (p (4));
- }
-
- if (std::abs (p (5)) < 10e-5)
- {
- //p(5) = 0;
- cz = 1.0;
- sz = 0.0;
- }
- else
- {
- cz = std::cos (p (5));
- sz = sin (p (5));
- }
+ const auto calculate_cos_sin = [](double angle, double& c, double& s) {
+ if (std::abs(angle) < 10e-5) {
+ c = 1.0;
+ s = 0.0;
+ }
+ else {
+ c = std::cos(angle);
+ s = std::sin(angle);
+ }
+ };
- // Precomputed angular gradiant components. Letters correspond to Equation 6.19 [Magnusson 2009]
- j_ang_a_ << (-sx * sz + cx * sy * cz), (-sx * cz - cx * sy * sz), (-cx * cy);
- j_ang_b_ << (cx * sz + sx * sy * cz), (cx * cz - sx * sy * sz), (-sx * cy);
- j_ang_c_ << (-sy * cz), sy * sz, cy;
- j_ang_d_ << sx * cy * cz, (-sx * cy * sz), sx * sy;
- j_ang_e_ << (-cx * cy * cz), cx * cy * sz, (-cx * sy);
- j_ang_f_ << (-cy * sz), (-cy * cz), 0;
- j_ang_g_ << (cx * cz - sx * sy * sz), (-cx * sz - sx * sy * cz), 0;
- j_ang_h_ << (sx * cz + cx * sy * sz), (cx * sy * cz - sx * sz), 0;
-
- if (compute_hessian)
- {
- // Precomputed angular hessian components. Letters correspond to Equation 6.21 and numbers correspond to row index [Magnusson 2009]
- h_ang_a2_ << (-cx * sz - sx * sy * cz), (-cx * cz + sx * sy * sz), sx * cy;
- h_ang_a3_ << (-sx * sz + cx * sy * cz), (-cx * sy * sz - sx * cz), (-cx * cy);
-
- h_ang_b2_ << (cx * cy * cz), (-cx * cy * sz), (cx * sy);
- h_ang_b3_ << (sx * cy * cz), (-sx * cy * sz), (sx * sy);
-
- h_ang_c2_ << (-sx * cz - cx * sy * sz), (sx * sz - cx * sy * cz), 0;
- h_ang_c3_ << (cx * cz - sx * sy * sz), (-sx * sy * cz - cx * sz), 0;
-
- h_ang_d1_ << (-cy * cz), (cy * sz), (sy);
- h_ang_d2_ << (-sx * sy * cz), (sx * sy * sz), (sx * cy);
- h_ang_d3_ << (cx * sy * cz), (-cx * sy * sz), (-cx * cy);
-
- h_ang_e1_ << (sy * sz), (sy * cz), 0;
- h_ang_e2_ << (-sx * cy * sz), (-sx * cy * cz), 0;
- h_ang_e3_ << (cx * cy * sz), (cx * cy * cz), 0;
-
- h_ang_f1_ << (-cy * cz), (cy * sz), 0;
- h_ang_f2_ << (-cx * sz - sx * sy * cz), (-cx * cz + sx * sy * sz), 0;
- h_ang_f3_ << (-sx * sz + cx * sy * cz), (-cx * sy * sz - sx * cz), 0;
+ double cx, cy, cz, sx, sy, sz;
+ calculate_cos_sin(transform(3), cx, sx);
+ calculate_cos_sin(transform(4), cy, sy);
+ calculate_cos_sin(transform(5), cz, sz);
+
+ // Precomputed angular gradient components. Letters correspond to Equation 6.19
+ // [Magnusson 2009]
+ angular_jacobian_.setZero();
+ angular_jacobian_.row(0).noalias() = Eigen::Vector4d(
+ (-sx * sz + cx * sy * cz), (-sx * cz - cx * sy * sz), (-cx * cy), 1.0); // a
+ angular_jacobian_.row(1).noalias() = Eigen::Vector4d(
+ (cx * sz + sx * sy * cz), (cx * cz - sx * sy * sz), (-sx * cy), 1.0); // b
+ angular_jacobian_.row(2).noalias() =
+ Eigen::Vector4d((-sy * cz), sy * sz, cy, 1.0); // c
+ angular_jacobian_.row(3).noalias() =
+ Eigen::Vector4d(sx * cy * cz, (-sx * cy * sz), sx * sy, 1.0); // d
+ angular_jacobian_.row(4).noalias() =
+ Eigen::Vector4d((-cx * cy * cz), cx * cy * sz, (-cx * sy), 1.0); // e
+ angular_jacobian_.row(5).noalias() =
+ Eigen::Vector4d((-cy * sz), (-cy * cz), 0, 1.0); // f
+ angular_jacobian_.row(6).noalias() =
+ Eigen::Vector4d((cx * cz - sx * sy * sz), (-cx * sz - sx * sy * cz), 0, 1.0); // g
+ angular_jacobian_.row(7).noalias() =
+ Eigen::Vector4d((sx * cz + cx * sy * sz), (cx * sy * cz - sx * sz), 0, 1.0); // h
+
+ if (compute_hessian) {
+ // Precomputed angular hessian components. Letters correspond to Equation 6.21 and
+ // numbers correspond to row index [Magnusson 2009]
+ angular_hessian_.setZero();
+ angular_hessian_.row(0).noalias() = Eigen::Vector4d(
+ (-cx * sz - sx * sy * cz), (-cx * cz + sx * sy * sz), sx * cy, 0.0f); // a2
+ angular_hessian_.row(1).noalias() = Eigen::Vector4d(
+ (-sx * sz + cx * sy * cz), (-cx * sy * sz - sx * cz), (-cx * cy), 0.0f); // a3
+
+ angular_hessian_.row(2).noalias() =
+ Eigen::Vector4d((cx * cy * cz), (-cx * cy * sz), (cx * sy), 0.0f); // b2
+ angular_hessian_.row(3).noalias() =
+ Eigen::Vector4d((sx * cy * cz), (-sx * cy * sz), (sx * sy), 0.0f); // b3
+
+ angular_hessian_.row(4).noalias() = Eigen::Vector4d(
+ (-sx * cz - cx * sy * sz), (sx * sz - cx * sy * cz), 0, 0.0f); // c2
+ angular_hessian_.row(5).noalias() = Eigen::Vector4d(
+ (cx * cz - sx * sy * sz), (-sx * sy * cz - cx * sz), 0, 0.0f); // c3
+
+ angular_hessian_.row(6).noalias() =
+ Eigen::Vector4d((-cy * cz), (cy * sz), (sy), 0.0f); // d1
+ angular_hessian_.row(7).noalias() =
+ Eigen::Vector4d((-sx * sy * cz), (sx * sy * sz), (sx * cy), 0.0f); // d2
+ angular_hessian_.row(8).noalias() =
+ Eigen::Vector4d((cx * sy * cz), (-cx * sy * sz), (-cx * cy), 0.0f); // d3
+
+ angular_hessian_.row(9).noalias() =
+ Eigen::Vector4d((sy * sz), (sy * cz), 0, 0.0f); // e1
+ angular_hessian_.row(10).noalias() =
+ Eigen::Vector4d((-sx * cy * sz), (-sx * cy * cz), 0, 0.0f); // e2
+ angular_hessian_.row(11).noalias() =
+ Eigen::Vector4d((cx * cy * sz), (cx * cy * cz), 0, 0.0f); // e3
+
+ angular_hessian_.row(12).noalias() =
+ Eigen::Vector4d((-cy * cz), (cy * sz), 0, 0.0f); // f1
+ angular_hessian_.row(13).noalias() = Eigen::Vector4d(
+ (-cx * sz - sx * sy * cz), (-cx * cz + sx * sy * sz), 0, 0.0f); // f2
+ angular_hessian_.row(14).noalias() = Eigen::Vector4d(
+ (-sx * sz + cx * sy * cz), (-cx * sy * sz - sx * cz), 0, 0.0f); // f3
}
}
-
-template<typename PointSource, typename PointTarget> void
-NormalDistributionsTransform<PointSource, PointTarget>::computePointDerivatives (Eigen::Vector3d &x, bool compute_hessian)
+template <typename PointSource, typename PointTarget>
+void
+NormalDistributionsTransform<PointSource, PointTarget>::computePointDerivatives(
+ const Eigen::Vector3d& x, bool compute_hessian)
{
- // Calculate first derivative of Transformation Equation 6.17 w.r.t. transform vector p.
- // Derivative w.r.t. ith element of transform vector corresponds to column i, Equation 6.18 and 6.19 [Magnusson 2009]
- point_gradient_ (1, 3) = x.dot (j_ang_a_);
- point_gradient_ (2, 3) = x.dot (j_ang_b_);
- point_gradient_ (0, 4) = x.dot (j_ang_c_);
- point_gradient_ (1, 4) = x.dot (j_ang_d_);
- point_gradient_ (2, 4) = x.dot (j_ang_e_);
- point_gradient_ (0, 5) = x.dot (j_ang_f_);
- point_gradient_ (1, 5) = x.dot (j_ang_g_);
- point_gradient_ (2, 5) = x.dot (j_ang_h_);
-
- if (compute_hessian)
- {
- // Vectors from Equation 6.21 [Magnusson 2009]
- Eigen::Vector3d a, b, c, d, e, f;
-
- a << 0, x.dot (h_ang_a2_), x.dot (h_ang_a3_);
- b << 0, x.dot (h_ang_b2_), x.dot (h_ang_b3_);
- c << 0, x.dot (h_ang_c2_), x.dot (h_ang_c3_);
- d << x.dot (h_ang_d1_), x.dot (h_ang_d2_), x.dot (h_ang_d3_);
- e << x.dot (h_ang_e1_), x.dot (h_ang_e2_), x.dot (h_ang_e3_);
- f << x.dot (h_ang_f1_), x.dot (h_ang_f2_), x.dot (h_ang_f3_);
+ // Calculate first derivative of Transformation Equation 6.17 w.r.t. transform vector.
+ // Derivative w.r.t. ith element of transform vector corresponds to column i,
+ // Equation 6.18 and 6.19 [Magnusson 2009]
+ Eigen::Matrix<double, 8, 1> point_angular_jacobian =
+ angular_jacobian_ * Eigen::Vector4d(x[0], x[1], x[2], 0.0);
+ point_jacobian_(1, 3) = point_angular_jacobian[0];
+ point_jacobian_(2, 3) = point_angular_jacobian[1];
+ point_jacobian_(0, 4) = point_angular_jacobian[2];
+ point_jacobian_(1, 4) = point_angular_jacobian[3];
+ point_jacobian_(2, 4) = point_angular_jacobian[4];
+ point_jacobian_(0, 5) = point_angular_jacobian[5];
+ point_jacobian_(1, 5) = point_angular_jacobian[6];
+ point_jacobian_(2, 5) = point_angular_jacobian[7];
+
+ if (compute_hessian) {
+ Eigen::Matrix<double, 15, 1> point_angular_hessian =
+ angular_hessian_ * Eigen::Vector4d(x[0], x[1], x[2], 0.0);
- // Calculate second derivative of Transformation Equation 6.17 w.r.t. transform vector p.
- // Derivative w.r.t. ith and jth elements of transform vector corresponds to the 3x1 block matrix starting at (3i,j), Equation 6.20 and 6.21 [Magnusson 2009]
+ // Vectors from Equation 6.21 [Magnusson 2009]
+ const Eigen::Vector3d a(0, point_angular_hessian[0], point_angular_hessian[1]);
+ const Eigen::Vector3d b(0, point_angular_hessian[2], point_angular_hessian[3]);
+ const Eigen::Vector3d c(0, point_angular_hessian[4], point_angular_hessian[5]);
+ const Eigen::Vector3d d = point_angular_hessian.block<3, 1>(6, 0);
+ const Eigen::Vector3d e = point_angular_hessian.block<3, 1>(9, 0);
+ const Eigen::Vector3d f = point_angular_hessian.block<3, 1>(12, 0);
+
+ // Calculate second derivative of Transformation Equation 6.17 w.r.t. transform
+ // vector. Derivative w.r.t. ith and jth elements of transform vector corresponds to
+ // the 3x1 block matrix starting at (3i,j), Equation 6.20 and 6.21 [Magnusson 2009]
point_hessian_.block<3, 1>(9, 3) = a;
point_hessian_.block<3, 1>(12, 3) = b;
point_hessian_.block<3, 1>(15, 3) = c;
}
}
-
-template<typename PointSource, typename PointTarget> double
-NormalDistributionsTransform<PointSource, PointTarget>::updateDerivatives (Eigen::Matrix<double, 6, 1> &score_gradient,
- Eigen::Matrix<double, 6, 6> &hessian,
- Eigen::Vector3d &x_trans, Eigen::Matrix3d &c_inv,
- bool compute_hessian)
+template <typename PointSource, typename PointTarget>
+double
+NormalDistributionsTransform<PointSource, PointTarget>::updateDerivatives(
+ Eigen::Matrix<double, 6, 1>& score_gradient,
+ Eigen::Matrix<double, 6, 6>& hessian,
+ const Eigen::Vector3d& x_trans,
+ const Eigen::Matrix3d& c_inv,
+ bool compute_hessian) const
{
- Eigen::Vector3d cov_dxd_pi;
// e^(-d_2/2 * (x_k - mu_k)^T Sigma_k^-1 (x_k - mu_k)) Equation 6.9 [Magnusson 2009]
- double e_x_cov_x = std::exp (-gauss_d2_ * x_trans.dot (c_inv * x_trans) / 2);
- // Calculate probability of transformed points existence, Equation 6.9 [Magnusson 2009]
- double score_inc = -gauss_d1_ * e_x_cov_x;
+ double e_x_cov_x = std::exp(-gauss_d2_ * x_trans.dot(c_inv * x_trans) / 2);
+ // Calculate probability of transformed points existence, Equation 6.9 [Magnusson
+ // 2009]
+ const double score_inc = -gauss_d1_ * e_x_cov_x;
e_x_cov_x = gauss_d2_ * e_x_cov_x;
// Error checking for invalid values.
- if (e_x_cov_x > 1 || e_x_cov_x < 0 || std::isnan(e_x_cov_x))
- return (0);
+ if (e_x_cov_x > 1 || e_x_cov_x < 0 || std::isnan(e_x_cov_x)) {
+ return 0;
+ }
// Reusable portion of Equation 6.12 and 6.13 [Magnusson 2009]
e_x_cov_x *= gauss_d1_;
-
- for (int i = 0; i < 6; i++)
- {
- // Sigma_k^-1 d(T(x,p))/dpi, Reusable portion of Equation 6.12 and 6.13 [Magnusson 2009]
- cov_dxd_pi = c_inv * point_gradient_.col (i);
+ for (int i = 0; i < 6; i++) {
+ // Sigma_k^-1 d(T(x,p))/dpi, Reusable portion of Equation 6.12 and 6.13 [Magnusson
+ // 2009]
+ const Eigen::Vector3d cov_dxd_pi = c_inv * point_jacobian_.col(i);
// Update gradient, Equation 6.12 [Magnusson 2009]
- score_gradient (i) += x_trans.dot (cov_dxd_pi) * e_x_cov_x;
+ score_gradient(i) += x_trans.dot(cov_dxd_pi) * e_x_cov_x;
- if (compute_hessian)
- {
- for (Eigen::Index j = 0; j < hessian.cols (); j++)
- {
+ if (compute_hessian) {
+ for (Eigen::Index j = 0; j < hessian.cols(); j++) {
// Update hessian, Equation 6.13 [Magnusson 2009]
- hessian (i, j) += e_x_cov_x * (-gauss_d2_ * x_trans.dot (cov_dxd_pi) * x_trans.dot (c_inv * point_gradient_.col (j)) +
- x_trans.dot (c_inv * point_hessian_.block<3, 1>(3 * i, j)) +
- point_gradient_.col (j).dot (cov_dxd_pi) );
+ hessian(i, j) +=
+ e_x_cov_x * (-gauss_d2_ * x_trans.dot(cov_dxd_pi) *
+ x_trans.dot(c_inv * point_jacobian_.col(j)) +
+ x_trans.dot(c_inv * point_hessian_.block<3, 1>(3 * i, j)) +
+ point_jacobian_.col(j).dot(cov_dxd_pi));
}
}
}
- return (score_inc);
+ return score_inc;
}
-
-template<typename PointSource, typename PointTarget> void
-NormalDistributionsTransform<PointSource, PointTarget>::computeHessian (Eigen::Matrix<double, 6, 6> &hessian,
- PointCloudSource &trans_cloud, Eigen::Matrix<double, 6, 1> &)
+template <typename PointSource, typename PointTarget>
+void
+NormalDistributionsTransform<PointSource, PointTarget>::computeHessian(
+ Eigen::Matrix<double, 6, 6>& hessian, const PointCloudSource& trans_cloud)
{
- // Original Point and Transformed Point
- PointSource x_pt, x_trans_pt;
- // Original Point and Transformed Point (for math)
- Eigen::Vector3d x, x_trans;
- // Occupied Voxel
- TargetGridLeafConstPtr cell;
- // Inverse Covariance of Occupied Voxel
- Eigen::Matrix3d c_inv;
+ hessian.setZero();
- hessian.setZero ();
+ // Precompute Angular Derivatives unessisary because only used after regular
+ // derivative calculation Update hessian for each point, line 17 in Algorithm 2
+ // [Magnusson 2009]
+ for (std::size_t idx = 0; idx < input_->size(); idx++) {
+ // Transformed Point
+ const auto& x_trans_pt = trans_cloud[idx];
- // Precompute Angular Derivatives unessisary because only used after regular derivative calculation
-
- // Update hessian for each point, line 17 in Algorithm 2 [Magnusson 2009]
- for (std::size_t idx = 0; idx < input_->size (); idx++)
- {
- x_trans_pt = trans_cloud[idx];
-
- // Find nieghbors (Radius search has been experimentally faster than direct neighbor checking.
+ // Find nieghbors (Radius search has been experimentally faster than direct neighbor
+ // checking.
std::vector<TargetGridLeafConstPtr> neighborhood;
std::vector<float> distances;
- target_cells_.radiusSearch (x_trans_pt, resolution_, neighborhood, distances);
-
- for (typename std::vector<TargetGridLeafConstPtr>::iterator neighborhood_it = neighborhood.begin (); neighborhood_it != neighborhood.end (); ++neighborhood_it)
- {
- cell = *neighborhood_it;
-
- {
- x_pt = (*input_)[idx];
- x = Eigen::Vector3d (x_pt.x, x_pt.y, x_pt.z);
+ target_cells_.radiusSearch(x_trans_pt, resolution_, neighborhood, distances);
- x_trans = Eigen::Vector3d (x_trans_pt.x, x_trans_pt.y, x_trans_pt.z);
+ for (const auto& cell : neighborhood) {
+ // Original Point
+ const auto& x_pt = (*input_)[idx];
+ const Eigen::Vector3d x = x_pt.getVector3fMap().template cast<double>();
- // Denorm point, x_k' in Equations 6.12 and 6.13 [Magnusson 2009]
- x_trans -= cell->getMean ();
- // Uses precomputed covariance for speed.
- c_inv = cell->getInverseCov ();
-
- // Compute derivative of transform function w.r.t. transform vector, J_E and H_E in Equations 6.18 and 6.20 [Magnusson 2009]
- computePointDerivatives (x);
- // Update hessian, lines 21 in Algorithm 2, according to Equations 6.10, 6.12 and 6.13, respectively [Magnusson 2009]
- updateHessian (hessian, x_trans, c_inv);
- }
+ // Denorm point, x_k' in Equations 6.12 and 6.13 [Magnusson 2009]
+ const Eigen::Vector3d x_trans =
+ x_trans_pt.getVector3fMap().template cast<double>() - cell->getMean();
+ // Inverse Covariance of Occupied Voxel
+ // Uses precomputed covariance for speed.
+ const Eigen::Matrix3d c_inv = cell->getInverseCov();
+
+ // Compute derivative of transform function w.r.t. transform vector, J_E and H_E
+ // in Equations 6.18 and 6.20 [Magnusson 2009]
+ computePointDerivatives(x);
+ // Update hessian, lines 21 in Algorithm 2, according to Equations 6.10, 6.12
+ // and 6.13, respectively [Magnusson 2009]
+ updateHessian(hessian, x_trans, c_inv);
}
}
}
-
-template<typename PointSource, typename PointTarget> void
-NormalDistributionsTransform<PointSource, PointTarget>::updateHessian (Eigen::Matrix<double, 6, 6> &hessian, Eigen::Vector3d &x_trans, Eigen::Matrix3d &c_inv)
+template <typename PointSource, typename PointTarget>
+void
+NormalDistributionsTransform<PointSource, PointTarget>::updateHessian(
+ Eigen::Matrix<double, 6, 6>& hessian,
+ const Eigen::Vector3d& x_trans,
+ const Eigen::Matrix3d& c_inv) const
{
- Eigen::Vector3d cov_dxd_pi;
// e^(-d_2/2 * (x_k - mu_k)^T Sigma_k^-1 (x_k - mu_k)) Equation 6.9 [Magnusson 2009]
- double e_x_cov_x = gauss_d2_ * std::exp (-gauss_d2_ * x_trans.dot (c_inv * x_trans) / 2);
+ double e_x_cov_x =
+ gauss_d2_ * std::exp(-gauss_d2_ * x_trans.dot(c_inv * x_trans) / 2);
// Error checking for invalid values.
- if (e_x_cov_x > 1 || e_x_cov_x < 0 || std::isnan(e_x_cov_x))
+ if (e_x_cov_x > 1 || e_x_cov_x < 0 || std::isnan(e_x_cov_x)) {
return;
+ }
// Reusable portion of Equation 6.12 and 6.13 [Magnusson 2009]
e_x_cov_x *= gauss_d1_;
- for (int i = 0; i < 6; i++)
- {
- // Sigma_k^-1 d(T(x,p))/dpi, Reusable portion of Equation 6.12 and 6.13 [Magnusson 2009]
- cov_dxd_pi = c_inv * point_gradient_.col (i);
+ for (int i = 0; i < 6; i++) {
+ // Sigma_k^-1 d(T(x,p))/dpi, Reusable portion of Equation 6.12 and 6.13 [Magnusson
+ // 2009]
+ const Eigen::Vector3d cov_dxd_pi = c_inv * point_jacobian_.col(i);
- for (Eigen::Index j = 0; j < hessian.cols (); j++)
- {
+ for (Eigen::Index j = 0; j < hessian.cols(); j++) {
// Update hessian, Equation 6.13 [Magnusson 2009]
- hessian (i, j) += e_x_cov_x * (-gauss_d2_ * x_trans.dot (cov_dxd_pi) * x_trans.dot (c_inv * point_gradient_.col (j)) +
- x_trans.dot (c_inv * point_hessian_.block<3, 1>(3 * i, j)) +
- point_gradient_.col (j).dot (cov_dxd_pi) );
+ hessian(i, j) +=
+ e_x_cov_x * (-gauss_d2_ * x_trans.dot(cov_dxd_pi) *
+ x_trans.dot(c_inv * point_jacobian_.col(j)) +
+ x_trans.dot(c_inv * point_hessian_.block<3, 1>(3 * i, j)) +
+ point_jacobian_.col(j).dot(cov_dxd_pi));
}
}
-
}
-
-template<typename PointSource, typename PointTarget> bool
-NormalDistributionsTransform<PointSource, PointTarget>::updateIntervalMT (double &a_l, double &f_l, double &g_l,
- double &a_u, double &f_u, double &g_u,
- double a_t, double f_t, double g_t)
+template <typename PointSource, typename PointTarget>
+bool
+NormalDistributionsTransform<PointSource, PointTarget>::updateIntervalMT(
+ double& a_l,
+ double& f_l,
+ double& g_l,
+ double& a_u,
+ double& f_u,
+ double& g_u,
+ double a_t,
+ double f_t,
+ double g_t) const
{
- // Case U1 in Update Algorithm and Case a in Modified Update Algorithm [More, Thuente 1994]
- if (f_t > f_l)
- {
+ // Case U1 in Update Algorithm and Case a in Modified Update Algorithm [More, Thuente
+ // 1994]
+ if (f_t > f_l) {
a_u = a_t;
f_u = f_t;
g_u = g_t;
- return (false);
+ return false;
}
- // Case U2 in Update Algorithm and Case b in Modified Update Algorithm [More, Thuente 1994]
- if (g_t * (a_l - a_t) > 0)
- {
+ // Case U2 in Update Algorithm and Case b in Modified Update Algorithm [More, Thuente
+ // 1994]
+ if (g_t * (a_l - a_t) > 0) {
a_l = a_t;
f_l = f_t;
g_l = g_t;
- return (false);
+ return false;
}
- // Case U3 in Update Algorithm and Case c in Modified Update Algorithm [More, Thuente 1994]
- if (g_t * (a_l - a_t) < 0)
- {
+ // Case U3 in Update Algorithm and Case c in Modified Update Algorithm [More, Thuente
+ // 1994]
+ if (g_t * (a_l - a_t) < 0) {
a_u = a_l;
f_u = f_l;
g_u = g_l;
a_l = a_t;
f_l = f_t;
g_l = g_t;
- return (false);
+ return false;
}
// Interval Converged
- return (true);
+ return true;
}
-
-template<typename PointSource, typename PointTarget> double
-NormalDistributionsTransform<PointSource, PointTarget>::trialValueSelectionMT (double a_l, double f_l, double g_l,
- double a_u, double f_u, double g_u,
- double a_t, double f_t, double g_t)
+template <typename PointSource, typename PointTarget>
+double
+NormalDistributionsTransform<PointSource, PointTarget>::trialValueSelectionMT(
+ double a_l,
+ double f_l,
+ double g_l,
+ double a_u,
+ double f_u,
+ double g_u,
+ double a_t,
+ double f_t,
+ double g_t) const
{
- // Case 1 in Trial Value Selection [More, Thuente 1994]
- if (f_t > f_l)
- {
+ if (a_t == a_l && a_t == a_u) {
+ return a_t;
+ }
+
+ // Endpoints condition check [More, Thuente 1994], p.299 - 300
+ enum class EndpointsCondition { Case1, Case2, Case3, Case4 };
+ EndpointsCondition condition;
+
+ if (a_t == a_l) {
+ condition = EndpointsCondition::Case4;
+ }
+ else if (f_t > f_l) {
+ condition = EndpointsCondition::Case1;
+ }
+ else if (g_t * g_l < 0) {
+ condition = EndpointsCondition::Case2;
+ }
+ else if (std::fabs(g_t) <= std::fabs(g_l)) {
+ condition = EndpointsCondition::Case3;
+ }
+ else {
+ condition = EndpointsCondition::Case4;
+ }
+
+ switch (condition) {
+ case EndpointsCondition::Case1: {
// Calculate the minimizer of the cubic that interpolates f_l, f_t, g_l and g_t
// Equation 2.4.52 [Sun, Yuan 2006]
- double z = 3 * (f_t - f_l) / (a_t - a_l) - g_t - g_l;
- double w = std::sqrt (z * z - g_t * g_l);
+ const double z = 3 * (f_t - f_l) / (a_t - a_l) - g_t - g_l;
+ const double w = std::sqrt(z * z - g_t * g_l);
// Equation 2.4.56 [Sun, Yuan 2006]
- double a_c = a_l + (a_t - a_l) * (w - g_l - z) / (g_t - g_l + 2 * w);
+ const double a_c = a_l + (a_t - a_l) * (w - g_l - z) / (g_t - g_l + 2 * w);
// Calculate the minimizer of the quadratic that interpolates f_l, f_t and g_l
// Equation 2.4.2 [Sun, Yuan 2006]
- double a_q = a_l - 0.5 * (a_l - a_t) * g_l / (g_l - (f_l - f_t) / (a_l - a_t));
+ const double a_q =
+ a_l - 0.5 * (a_l - a_t) * g_l / (g_l - (f_l - f_t) / (a_l - a_t));
- if (std::fabs (a_c - a_l) < std::fabs (a_q - a_l))
- return (a_c);
- return (0.5 * (a_q + a_c));
+ if (std::fabs(a_c - a_l) < std::fabs(a_q - a_l)) {
+ return a_c;
+ }
+ return 0.5 * (a_q + a_c);
}
- // Case 2 in Trial Value Selection [More, Thuente 1994]
- if (g_t * g_l < 0)
- {
+
+ case EndpointsCondition::Case2: {
// Calculate the minimizer of the cubic that interpolates f_l, f_t, g_l and g_t
// Equation 2.4.52 [Sun, Yuan 2006]
- double z = 3 * (f_t - f_l) / (a_t - a_l) - g_t - g_l;
- double w = std::sqrt (z * z - g_t * g_l);
+ const double z = 3 * (f_t - f_l) / (a_t - a_l) - g_t - g_l;
+ const double w = std::sqrt(z * z - g_t * g_l);
// Equation 2.4.56 [Sun, Yuan 2006]
- double a_c = a_l + (a_t - a_l) * (w - g_l - z) / (g_t - g_l + 2 * w);
+ const double a_c = a_l + (a_t - a_l) * (w - g_l - z) / (g_t - g_l + 2 * w);
// Calculate the minimizer of the quadratic that interpolates f_l, g_l and g_t
// Equation 2.4.5 [Sun, Yuan 2006]
- double a_s = a_l - (a_l - a_t) / (g_l - g_t) * g_l;
+ const double a_s = a_l - (a_l - a_t) / (g_l - g_t) * g_l;
- if (std::fabs (a_c - a_t) >= std::fabs (a_s - a_t))
- return (a_c);
- return (a_s);
+ if (std::fabs(a_c - a_t) >= std::fabs(a_s - a_t)) {
+ return a_c;
+ }
+ return a_s;
}
- // Case 3 in Trial Value Selection [More, Thuente 1994]
- if (std::fabs (g_t) <= std::fabs (g_l))
- {
+
+ case EndpointsCondition::Case3: {
// Calculate the minimizer of the cubic that interpolates f_l, f_t, g_l and g_t
// Equation 2.4.52 [Sun, Yuan 2006]
- double z = 3 * (f_t - f_l) / (a_t - a_l) - g_t - g_l;
- double w = std::sqrt (z * z - g_t * g_l);
- double a_c = a_l + (a_t - a_l) * (w - g_l - z) / (g_t - g_l + 2 * w);
+ const double z = 3 * (f_t - f_l) / (a_t - a_l) - g_t - g_l;
+ const double w = std::sqrt(z * z - g_t * g_l);
+ const double a_c = a_l + (a_t - a_l) * (w - g_l - z) / (g_t - g_l + 2 * w);
// Calculate the minimizer of the quadratic that interpolates g_l and g_t
// Equation 2.4.5 [Sun, Yuan 2006]
- double a_s = a_l - (a_l - a_t) / (g_l - g_t) * g_l;
+ const double a_s = a_l - (a_l - a_t) / (g_l - g_t) * g_l;
double a_t_next;
- if (std::fabs (a_c - a_t) < std::fabs (a_s - a_t))
+ if (std::fabs(a_c - a_t) < std::fabs(a_s - a_t)) {
a_t_next = a_c;
- else
+ }
+ else {
a_t_next = a_s;
+ }
- if (a_t > a_l)
- return (std::min (a_t + 0.66 * (a_u - a_t), a_t_next));
- return (std::max (a_t + 0.66 * (a_u - a_t), a_t_next));
+ if (a_t > a_l) {
+ return std::min(a_t + 0.66 * (a_u - a_t), a_t_next);
+ }
+ return std::max(a_t + 0.66 * (a_u - a_t), a_t_next);
}
- // Case 4 in Trial Value Selection [More, Thuente 1994]
- // Calculate the minimizer of the cubic that interpolates f_u, f_t, g_u and g_t
- // Equation 2.4.52 [Sun, Yuan 2006]
- double z = 3 * (f_t - f_u) / (a_t - a_u) - g_t - g_u;
- double w = std::sqrt (z * z - g_t * g_u);
- // Equation 2.4.56 [Sun, Yuan 2006]
- return (a_u + (a_t - a_u) * (w - g_u - z) / (g_t - g_u + 2 * w));
-}
+ default:
+ case EndpointsCondition::Case4: {
+ // Calculate the minimizer of the cubic that interpolates f_u, f_t, g_u and g_t
+ // Equation 2.4.52 [Sun, Yuan 2006]
+ const double z = 3 * (f_t - f_u) / (a_t - a_u) - g_t - g_u;
+ const double w = std::sqrt(z * z - g_t * g_u);
+ // Equation 2.4.56 [Sun, Yuan 2006]
+ return a_u + (a_t - a_u) * (w - g_u - z) / (g_t - g_u + 2 * w);
+ }
+ }
+}
-template<typename PointSource, typename PointTarget> double
-NormalDistributionsTransform<PointSource, PointTarget>::computeStepLengthMT (const Eigen::Matrix<double, 6, 1> &x, Eigen::Matrix<double, 6, 1> &step_dir, double step_init, double step_max,
- double step_min, double &score, Eigen::Matrix<double, 6, 1> &score_gradient, Eigen::Matrix<double, 6, 6> &hessian,
- PointCloudSource &trans_cloud)
+template <typename PointSource, typename PointTarget>
+double
+NormalDistributionsTransform<PointSource, PointTarget>::computeStepLengthMT(
+ const Eigen::Matrix<double, 6, 1>& x,
+ Eigen::Matrix<double, 6, 1>& step_dir,
+ double step_init,
+ double step_max,
+ double step_min,
+ double& score,
+ Eigen::Matrix<double, 6, 1>& score_gradient,
+ Eigen::Matrix<double, 6, 6>& hessian,
+ PointCloudSource& trans_cloud)
{
// Set the value of phi(0), Equation 1.3 [More, Thuente 1994]
- double phi_0 = -score;
+ const double phi_0 = -score;
// Set the value of phi'(0), Equation 1.3 [More, Thuente 1994]
- double d_phi_0 = -(score_gradient.dot (step_dir));
+ double d_phi_0 = -(score_gradient.dot(step_dir));
- Eigen::Matrix<double, 6, 1> x_t;
-
- if (d_phi_0 >= 0)
- {
+ if (d_phi_0 >= 0) {
// Not a decent direction
- if (d_phi_0 == 0)
+ if (d_phi_0 == 0) {
return 0;
+ }
// Reverse step direction and calculate optimal step.
d_phi_0 *= -1;
step_dir *= -1;
-
}
// The Search Algorithm for T(mu) [More, Thuente 1994]
- int max_step_iterations = 10;
+ const int max_step_iterations = 10;
int step_iterations = 0;
// Sufficient decreace constant, Equation 1.1 [More, Thuete 1994]
- double mu = 1.e-4;
+ const double mu = 1.e-4;
// Curvature condition constant, Equation 1.2 [More, Thuete 1994]
- double nu = 0.9;
+ const double nu = 0.9;
// Initial endpoints of Interval I,
double a_l = 0, a_u = 0;
- // Auxiliary function psi is used until I is determined ot be a closed interval, Equation 2.1 [More, Thuente 1994]
- double f_l = auxilaryFunction_PsiMT (a_l, phi_0, phi_0, d_phi_0, mu);
- double g_l = auxilaryFunction_dPsiMT (d_phi_0, d_phi_0, mu);
+ // Auxiliary function psi is used until I is determined ot be a closed interval,
+ // Equation 2.1 [More, Thuente 1994]
+ double f_l = auxilaryFunction_PsiMT(a_l, phi_0, phi_0, d_phi_0, mu);
+ double g_l = auxilaryFunction_dPsiMT(d_phi_0, d_phi_0, mu);
- double f_u = auxilaryFunction_PsiMT (a_u, phi_0, phi_0, d_phi_0, mu);
- double g_u = auxilaryFunction_dPsiMT (d_phi_0, d_phi_0, mu);
+ double f_u = auxilaryFunction_PsiMT(a_u, phi_0, phi_0, d_phi_0, mu);
+ double g_u = auxilaryFunction_dPsiMT(d_phi_0, d_phi_0, mu);
- // Check used to allow More-Thuente step length calculation to be skipped by making step_min == step_max
+ // Check used to allow More-Thuente step length calculation to be skipped by making
+ // step_min == step_max
bool interval_converged = (step_max - step_min) < 0, open_interval = true;
double a_t = step_init;
- a_t = std::min (a_t, step_max);
- a_t = std::max (a_t, step_min);
+ a_t = std::min(a_t, step_max);
+ a_t = std::max(a_t, step_min);
- x_t = x + step_dir * a_t;
+ Eigen::Matrix<double, 6, 1> x_t = x + step_dir * a_t;
- final_transformation_ = (Eigen::Translation<float, 3>(static_cast<float> (x_t (0)), static_cast<float> (x_t (1)), static_cast<float> (x_t (2))) *
- Eigen::AngleAxis<float> (static_cast<float> (x_t (3)), Eigen::Vector3f::UnitX ()) *
- Eigen::AngleAxis<float> (static_cast<float> (x_t (4)), Eigen::Vector3f::UnitY ()) *
- Eigen::AngleAxis<float> (static_cast<float> (x_t (5)), Eigen::Vector3f::UnitZ ())).matrix ();
+ // Convert x_t into matrix form
+ convertTransform(x_t, final_transformation_);
// New transformed point cloud
- transformPointCloud (*input_, trans_cloud, final_transformation_);
+ transformPointCloud(*input_, trans_cloud, final_transformation_);
- // Updates score, gradient and hessian. Hessian calculation is unessisary but testing showed that most step calculations use the
- // initial step suggestion and recalculation the reusable portions of the hessian would intail more computation time.
- score = computeDerivatives (score_gradient, hessian, trans_cloud, x_t, true);
+ // Updates score, gradient and hessian. Hessian calculation is unessisary but testing
+ // showed that most step calculations use the initial step suggestion and
+ // recalculation the reusable portions of the hessian would intail more computation
+ // time.
+ score = computeDerivatives(score_gradient, hessian, trans_cloud, x_t, true);
// Calculate phi(alpha_t)
double phi_t = -score;
// Calculate phi'(alpha_t)
- double d_phi_t = -(score_gradient.dot (step_dir));
+ double d_phi_t = -(score_gradient.dot(step_dir));
// Calculate psi(alpha_t)
- double psi_t = auxilaryFunction_PsiMT (a_t, phi_t, phi_0, d_phi_0, mu);
+ double psi_t = auxilaryFunction_PsiMT(a_t, phi_t, phi_0, d_phi_0, mu);
// Calculate psi'(alpha_t)
- double d_psi_t = auxilaryFunction_dPsiMT (d_phi_t, d_phi_0, mu);
-
- // Iterate until max number of iterations, interval convergance or a value satisfies the sufficient decrease, Equation 1.1, and curvature condition, Equation 1.2 [More, Thuente 1994]
- while (!interval_converged && step_iterations < max_step_iterations && !(psi_t <= 0 /*Sufficient Decrease*/ && d_phi_t <= -nu * d_phi_0 /*Curvature Condition*/))
- {
+ double d_psi_t = auxilaryFunction_dPsiMT(d_phi_t, d_phi_0, mu);
+
+ // Iterate until max number of iterations, interval convergance or a value satisfies
+ // the sufficient decrease, Equation 1.1, and curvature condition, Equation 1.2 [More,
+ // Thuente 1994]
+ while (!interval_converged && step_iterations < max_step_iterations &&
+ !(psi_t <= 0 /*Sufficient Decrease*/ &&
+ d_phi_t <= -nu * d_phi_0 /*Curvature Condition*/)) {
// Use auxiliary function if interval I is not closed
- if (open_interval)
- {
- a_t = trialValueSelectionMT (a_l, f_l, g_l,
- a_u, f_u, g_u,
- a_t, psi_t, d_psi_t);
+ if (open_interval) {
+ a_t = trialValueSelectionMT(a_l, f_l, g_l, a_u, f_u, g_u, a_t, psi_t, d_psi_t);
}
- else
- {
- a_t = trialValueSelectionMT (a_l, f_l, g_l,
- a_u, f_u, g_u,
- a_t, phi_t, d_phi_t);
+ else {
+ a_t = trialValueSelectionMT(a_l, f_l, g_l, a_u, f_u, g_u, a_t, phi_t, d_phi_t);
}
- a_t = std::min (a_t, step_max);
- a_t = std::max (a_t, step_min);
+ a_t = std::min(a_t, step_max);
+ a_t = std::max(a_t, step_min);
x_t = x + step_dir * a_t;
- final_transformation_ = (Eigen::Translation<float, 3> (static_cast<float> (x_t (0)), static_cast<float> (x_t (1)), static_cast<float> (x_t (2))) *
- Eigen::AngleAxis<float> (static_cast<float> (x_t (3)), Eigen::Vector3f::UnitX ()) *
- Eigen::AngleAxis<float> (static_cast<float> (x_t (4)), Eigen::Vector3f::UnitY ()) *
- Eigen::AngleAxis<float> (static_cast<float> (x_t (5)), Eigen::Vector3f::UnitZ ())).matrix ();
+ // Convert x_t into matrix form
+ convertTransform(x_t, final_transformation_);
// New transformed point cloud
// Done on final cloud to prevent wasted computation
- transformPointCloud (*input_, trans_cloud, final_transformation_);
+ transformPointCloud(*input_, trans_cloud, final_transformation_);
// Updates score, gradient. Values stored to prevent wasted computation.
- score = computeDerivatives (score_gradient, hessian, trans_cloud, x_t, false);
+ score = computeDerivatives(score_gradient, hessian, trans_cloud, x_t, false);
// Calculate phi(alpha_t+)
phi_t = -score;
// Calculate phi'(alpha_t+)
- d_phi_t = -(score_gradient.dot (step_dir));
+ d_phi_t = -(score_gradient.dot(step_dir));
// Calculate psi(alpha_t+)
- psi_t = auxilaryFunction_PsiMT (a_t, phi_t, phi_0, d_phi_0, mu);
+ psi_t = auxilaryFunction_PsiMT(a_t, phi_t, phi_0, d_phi_0, mu);
// Calculate psi'(alpha_t+)
- d_psi_t = auxilaryFunction_dPsiMT (d_phi_t, d_phi_0, mu);
+ d_psi_t = auxilaryFunction_dPsiMT(d_phi_t, d_phi_0, mu);
// Check if I is now a closed interval
- if (open_interval && (psi_t <= 0 && d_psi_t >= 0))
- {
+ if (open_interval && (psi_t <= 0 && d_psi_t >= 0)) {
open_interval = false;
// Converts f_l and g_l from psi to phi
g_u += mu * d_phi_0;
}
- if (open_interval)
- {
+ if (open_interval) {
// Update interval end points using Updating Algorithm [More, Thuente 1994]
- interval_converged = updateIntervalMT (a_l, f_l, g_l,
- a_u, f_u, g_u,
- a_t, psi_t, d_psi_t);
+ interval_converged =
+ updateIntervalMT(a_l, f_l, g_l, a_u, f_u, g_u, a_t, psi_t, d_psi_t);
}
- else
- {
- // Update interval end points using Modified Updating Algorithm [More, Thuente 1994]
- interval_converged = updateIntervalMT (a_l, f_l, g_l,
- a_u, f_u, g_u,
- a_t, phi_t, d_phi_t);
+ else {
+ // Update interval end points using Modified Updating Algorithm [More, Thuente
+ // 1994]
+ interval_converged =
+ updateIntervalMT(a_l, f_l, g_l, a_u, f_u, g_u, a_t, phi_t, d_phi_t);
}
step_iterations++;
// If inner loop was run then hessian needs to be calculated.
// Hessian is unnessisary for step length determination but gradients are required
// so derivative and transform data is stored for the next iteration.
- if (step_iterations)
- computeHessian (hessian, trans_cloud, x_t);
+ if (step_iterations) {
+ computeHessian(hessian, trans_cloud);
+ }
- return (a_t);
+ return a_t;
}
} // namespace pcl
#endif // PCL_REGISTRATION_NDT_IMPL_H_
-
/*
-* Software License Agreement (BSD License)
-*
-* Point Cloud Library (PCL) - www.pointclouds.org
-* Copyright (c) 2011-2012, Willow Garage, Inc.
-* Copyright (c) 2012-, Open Perception, Inc.
-*
-* All rights reserved.
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following conditions
-* are met:
-*
-* * Redistributions of source code must retain the above copyright
-* notice, this list of conditions and the following disclaimer.
-* * Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials provided
-* with the distribution.
-* * Neither the name of the copyright holder(s) nor the names of its
-* contributors may be used to endorse or promote products derived
-* from this software without specific prior written permission.
-*
-* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
-* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-* POSSIBILITY OF SUCH DAMAGE.
-*
-* $Id$
-*
-*/
+ * Software License Agreement (BSD License)
+ *
+ * Point Cloud Library (PCL) - www.pointclouds.org
+ * Copyright (c) 2011-2012, Willow Garage, Inc.
+ * Copyright (c) 2012-, Open Perception, Inc.
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the copyright holder(s) nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $Id$
+ *
+ */
#ifndef PCL_NDT_2D_IMPL_H_
#define PCL_NDT_2D_IMPL_H_
-#include <pcl/registration/eigen.h>
-#include <pcl/registration/boost.h>
+#include <Eigen/Eigenvalues> // for SelfAdjointEigenSolver, EigenSolver
#include <cmath>
#include <memory>
-namespace pcl
-{
+namespace pcl {
-namespace ndt2d
-{
+namespace ndt2d {
/** \brief Class to store vector value and first and second derivatives
- * (grad vector and hessian matrix), so they can be returned easily from
- * functions
- */
-template<unsigned N=3, typename T=double>
-struct ValueAndDerivatives
-{
- ValueAndDerivatives () : hessian (), grad (), value () {}
+ * (grad vector and hessian matrix), so they can be returned easily from
+ * functions
+ */
+template <unsigned N = 3, typename T = double>
+struct ValueAndDerivatives {
+ ValueAndDerivatives() : hessian(), grad(), value() {}
Eigen::Matrix<T, N, N> hessian;
- Eigen::Matrix<T, N, 1> grad;
+ Eigen::Matrix<T, N, 1> grad;
T value;
- static ValueAndDerivatives<N,T>
- Zero ()
+ static ValueAndDerivatives<N, T>
+ Zero()
{
- ValueAndDerivatives<N,T> r;
- r.hessian = Eigen::Matrix<T, N, N>::Zero ();
- r.grad = Eigen::Matrix<T, N, 1>::Zero ();
- r.value = 0;
+ ValueAndDerivatives<N, T> r;
+ r.hessian = Eigen::Matrix<T, N, N>::Zero();
+ r.grad = Eigen::Matrix<T, N, 1>::Zero();
+ r.value = 0;
return r;
}
- ValueAndDerivatives<N,T>&
- operator+= (ValueAndDerivatives<N,T> const& r)
+ ValueAndDerivatives<N, T>&
+ operator+=(ValueAndDerivatives<N, T> const& r)
{
hessian += r.hessian;
- grad += r.grad;
- value += r.value;
+ grad += r.grad;
+ value += r.value;
return *this;
}
};
/** \brief A normal distribution estimation class.
- *
- * First the indices of of the points from a point cloud that should be
- * modelled by the distribution are added with addIdx (...).
- *
- * Then estimateParams (...) uses the stored point indices to estimate the
- * parameters of a normal distribution, and discards the stored indices.
- *
- * Finally the distriubution, and its derivatives, may be evaluated at any
- * point using test (...).
- */
+ *
+ * First the indices of of the points from a point cloud that should be
+ * modelled by the distribution are added with addIdx (...).
+ *
+ * Then estimateParams (...) uses the stored point indices to estimate the
+ * parameters of a normal distribution, and discards the stored indices.
+ *
+ * Finally the distriubution, and its derivatives, may be evaluated at any
+ * point using test (...).
+ */
template <typename PointT>
-class NormalDist
-{
+class NormalDist {
using PointCloud = pcl::PointCloud<PointT>;
public:
- NormalDist ()
- : min_n_ (3), n_ (0)
- {
- }
+ NormalDist() : min_n_(3), n_(0) {}
/** \brief Store a point index to use later for estimating distribution parameters.
- * \param[in] i Point index to store
- */
+ * \param[in] i Point index to store
+ */
void
- addIdx (std::size_t i)
+ addIdx(std::size_t i)
{
- pt_indices_.push_back (i);
+ pt_indices_.push_back(i);
}
- /** \brief Estimate the normal distribution parameters given the point indices provided. Memory of point indices is cleared.
- * \param[in] cloud Point cloud corresponding to indices passed to addIdx.
- * \param[in] min_covar_eigvalue_mult Set the smallest eigenvalue to this times the largest.
- */
+ /** \brief Estimate the normal distribution parameters given the point indices
+ * provided. Memory of point indices is cleared. \param[in] cloud Point cloud
+ * corresponding to indices passed to addIdx. \param[in] min_covar_eigvalue_mult Set
+ * the smallest eigenvalue to this times the largest.
+ */
void
- estimateParams (const PointCloud& cloud, double min_covar_eigvalue_mult = 0.001)
+ estimateParams(const PointCloud& cloud, double min_covar_eigvalue_mult = 0.001)
{
- Eigen::Vector2d sx = Eigen::Vector2d::Zero ();
- Eigen::Matrix2d sxx = Eigen::Matrix2d::Zero ();
-
- for (auto i = pt_indices_.cbegin (); i != pt_indices_.cend (); i++)
- {
- Eigen::Vector2d p (cloud[*i]. x, cloud[*i]. y);
- sx += p;
- sxx += p * p.transpose ();
+ Eigen::Vector2d sx = Eigen::Vector2d::Zero();
+ Eigen::Matrix2d sxx = Eigen::Matrix2d::Zero();
+
+ for (auto i = pt_indices_.cbegin(); i != pt_indices_.cend(); i++) {
+ Eigen::Vector2d p(cloud[*i].x, cloud[*i].y);
+ sx += p;
+ sxx += p * p.transpose();
}
- n_ = pt_indices_.size ();
+ n_ = pt_indices_.size();
- if (n_ >= min_n_)
- {
- mean_ = sx / static_cast<double> (n_);
+ if (n_ >= min_n_) {
+ mean_ = sx / static_cast<double>(n_);
// Using maximum likelihood estimation as in the original paper
- Eigen::Matrix2d covar = (sxx - 2 * (sx * mean_.transpose ())) / static_cast<double> (n_) + mean_ * mean_.transpose ();
-
- Eigen::SelfAdjointEigenSolver<Eigen::Matrix2d> solver (covar);
- if (solver.eigenvalues ()[0] < min_covar_eigvalue_mult * solver.eigenvalues ()[1])
- {
- PCL_DEBUG ("[pcl::NormalDist::estimateParams] NDT normal fit: adjusting eigenvalue %f\n", solver.eigenvalues ()[0]);
- Eigen::Matrix2d l = solver.eigenvalues ().asDiagonal ();
- Eigen::Matrix2d q = solver.eigenvectors ();
+ Eigen::Matrix2d covar =
+ (sxx - 2 * (sx * mean_.transpose())) / static_cast<double>(n_) +
+ mean_ * mean_.transpose();
+
+ Eigen::SelfAdjointEigenSolver<Eigen::Matrix2d> solver(covar);
+ if (solver.eigenvalues()[0] < min_covar_eigvalue_mult * solver.eigenvalues()[1]) {
+ PCL_DEBUG("[pcl::NormalDist::estimateParams] NDT normal fit: adjusting "
+ "eigenvalue %f\n",
+ solver.eigenvalues()[0]);
+ Eigen::Matrix2d l = solver.eigenvalues().asDiagonal();
+ Eigen::Matrix2d q = solver.eigenvectors();
// set minimum smallest eigenvalue:
- l (0,0) = l (1,1) * min_covar_eigvalue_mult;
- covar = q * l * q.transpose ();
+ l(0, 0) = l(1, 1) * min_covar_eigvalue_mult;
+ covar = q * l * q.transpose();
}
- covar_inv_ = covar.inverse ();
+ covar_inv_ = covar.inverse();
}
- pt_indices_.clear ();
+ pt_indices_.clear();
}
- /** \brief Return the 'score' (denormalised likelihood) and derivatives of score of the point p given this distribution.
- * \param[in] transformed_pt Location to evaluate at.
- * \param[in] cos_theta sin(theta) of the current rotation angle of rigid transformation: to avoid repeated evaluation
- * \param[in] sin_theta cos(theta) of the current rotation angle of rigid transformation: to avoid repeated evaluation
- * estimateParams must have been called after at least three points were provided, or this will return zero.
- *
- */
- ValueAndDerivatives<3,double>
- test (const PointT& transformed_pt, const double& cos_theta, const double& sin_theta) const
+ /** \brief Return the 'score' (denormalised likelihood) and derivatives of score of
+ * the point p given this distribution. \param[in] transformed_pt Location to
+ * evaluate at. \param[in] cos_theta sin(theta) of the current rotation angle
+ * of rigid transformation: to avoid repeated evaluation \param[in] sin_theta
+ * cos(theta) of the current rotation angle of rigid transformation: to avoid repeated
+ * evaluation estimateParams must have been called after at least three points were
+ * provided, or this will return zero.
+ *
+ */
+ ValueAndDerivatives<3, double>
+ test(const PointT& transformed_pt,
+ const double& cos_theta,
+ const double& sin_theta) const
{
if (n_ < min_n_)
- return ValueAndDerivatives<3,double>::Zero ();
+ return ValueAndDerivatives<3, double>::Zero();
- ValueAndDerivatives<3,double> r;
+ ValueAndDerivatives<3, double> r;
const double x = transformed_pt.x;
const double y = transformed_pt.y;
- const Eigen::Vector2d p_xy (transformed_pt.x, transformed_pt.y);
+ const Eigen::Vector2d p_xy(transformed_pt.x, transformed_pt.y);
const Eigen::Vector2d q = p_xy - mean_;
- const Eigen::RowVector2d qt_cvi (q.transpose () * covar_inv_);
- const double exp_qt_cvi_q = std::exp (-0.5 * double (qt_cvi * q));
+ const Eigen::RowVector2d qt_cvi(q.transpose() * covar_inv_);
+ const double exp_qt_cvi_q = std::exp(-0.5 * double(qt_cvi * q));
r.value = -exp_qt_cvi_q;
Eigen::Matrix<double, 2, 3> jacobian;
- jacobian <<
- 1, 0, -(x * sin_theta + y*cos_theta),
- 0, 1, x * cos_theta - y*sin_theta;
+ jacobian << 1, 0, -(x * sin_theta + y * cos_theta), 0, 1,
+ x * cos_theta - y * sin_theta;
for (std::size_t i = 0; i < 3; i++)
- r.grad[i] = double (qt_cvi * jacobian.col (i)) * exp_qt_cvi_q;
+ r.grad[i] = double(qt_cvi * jacobian.col(i)) * exp_qt_cvi_q;
// second derivative only for i == j == 2:
- const Eigen::Vector2d d2q_didj (
- y * sin_theta - x*cos_theta,
- -(x * sin_theta + y*cos_theta)
- );
+ const Eigen::Vector2d d2q_didj(y * sin_theta - x * cos_theta,
+ -(x * sin_theta + y * cos_theta));
for (std::size_t i = 0; i < 3; i++)
for (std::size_t j = 0; j < 3; j++)
- r.hessian (i,j) = -exp_qt_cvi_q * (
- double (-qt_cvi*jacobian.col (i)) * double (-qt_cvi*jacobian.col (j)) +
- (-qt_cvi * ((i==2 && j==2)? d2q_didj : Eigen::Vector2d::Zero ())) +
- (-jacobian.col (j).transpose () * covar_inv_ * jacobian.col (i))
- );
+ r.hessian(i, j) =
+ -exp_qt_cvi_q *
+ (double(-qt_cvi * jacobian.col(i)) * double(-qt_cvi * jacobian.col(j)) +
+ (-qt_cvi * ((i == 2 && j == 2) ? d2q_didj : Eigen::Vector2d::Zero())) +
+ (-jacobian.col(j).transpose() * covar_inv_ * jacobian.col(i)));
return r;
}
};
/** \brief Build a set of normal distributions modelling a 2D point cloud,
- * and provide the value and derivatives of the model at any point via the
- * test (...) function.
- */
+ * and provide the value and derivatives of the model at any point via the
+ * test (...) function.
+ */
template <typename PointT>
-class NDTSingleGrid: public boost::noncopyable
-{
+class NDTSingleGrid : public boost::noncopyable {
using PointCloud = pcl::PointCloud<PointT>;
using PointCloudConstPtr = typename PointCloud::ConstPtr;
using NormalDist = pcl::ndt2d::NormalDist<PointT>;
public:
- NDTSingleGrid (PointCloudConstPtr cloud,
- const Eigen::Vector2f& about,
- const Eigen::Vector2f& extent,
- const Eigen::Vector2f& step)
- : min_ (about - extent), max_ (min_ + 2*extent), step_ (step),
- cells_ ((max_[0]-min_[0]) / step_[0],
- (max_[1]-min_[1]) / step_[1]),
- normal_distributions_ (cells_[0], cells_[1])
+ NDTSingleGrid(PointCloudConstPtr cloud,
+ const Eigen::Vector2f& about,
+ const Eigen::Vector2f& extent,
+ const Eigen::Vector2f& step)
+ : min_(about - extent)
+ , max_(min_ + 2 * extent)
+ , step_(step)
+ , cells_((max_[0] - min_[0]) / step_[0], (max_[1] - min_[1]) / step_[1])
+ , normal_distributions_(cells_[0], cells_[1])
{
// sort through all points, assigning them to distributions:
std::size_t used_points = 0;
- for (std::size_t i = 0; i < cloud->size (); i++)
- if (NormalDist* n = normalDistForPoint (cloud->at (i)))
- {
- n->addIdx (i);
- used_points++;
- }
+ for (std::size_t i = 0; i < cloud->size(); i++)
+ if (NormalDist* n = normalDistForPoint(cloud->at(i))) {
+ n->addIdx(i);
+ used_points++;
+ }
- PCL_DEBUG ("[pcl::NDTSingleGrid] NDT single grid %dx%d using %d/%d points\n", cells_[0], cells_[1], used_points, cloud->size ());
+ PCL_DEBUG("[pcl::NDTSingleGrid] NDT single grid %dx%d using %d/%d points\n",
+ cells_[0],
+ cells_[1],
+ used_points,
+ cloud->size());
// then bake the distributions such that they approximate the
// points (and throw away memory of the points)
for (int x = 0; x < cells_[0]; x++)
for (int y = 0; y < cells_[1]; y++)
- normal_distributions_.coeffRef (x,y).estimateParams (*cloud);
+ normal_distributions_.coeffRef(x, y).estimateParams(*cloud);
}
- /** \brief Return the 'score' (denormalised likelihood) and derivatives of score of the point p given this distribution.
- * \param[in] transformed_pt Location to evaluate at.
- * \param[in] cos_theta sin(theta) of the current rotation angle of rigid transformation: to avoid repeated evaluation
- * \param[in] sin_theta cos(theta) of the current rotation angle of rigid transformation: to avoid repeated evaluation
- */
- ValueAndDerivatives<3,double>
- test (const PointT& transformed_pt, const double& cos_theta, const double& sin_theta) const
+ /** \brief Return the 'score' (denormalised likelihood) and derivatives of score of
+ * the point p given this distribution. \param[in] transformed_pt Location to
+ * evaluate at. \param[in] cos_theta sin(theta) of the current rotation angle
+ * of rigid transformation: to avoid repeated evaluation \param[in] sin_theta
+ * cos(theta) of the current rotation angle of rigid transformation: to avoid repeated
+ * evaluation
+ */
+ ValueAndDerivatives<3, double>
+ test(const PointT& transformed_pt,
+ const double& cos_theta,
+ const double& sin_theta) const
{
- const NormalDist* n = normalDistForPoint (transformed_pt);
+ const NormalDist* n = normalDistForPoint(transformed_pt);
// index is in grid, return score from the normal distribution from
// the correct part of the grid:
if (n)
- return n->test (transformed_pt, cos_theta, sin_theta);
- return ValueAndDerivatives<3,double>::Zero ();
+ return n->test(transformed_pt, cos_theta, sin_theta);
+ return ValueAndDerivatives<3, double>::Zero();
}
protected:
/** \brief Return the normal distribution covering the location of point p
- * \param[in] p a point
- */
+ * \param[in] p a point
+ */
NormalDist*
- normalDistForPoint (PointT const& p) const
+ normalDistForPoint(PointT const& p) const
{
// this would be neater in 3d...
Eigen::Vector2f idxf;
for (std::size_t i = 0; i < 2; i++)
- idxf[i] = (p.getVector3fMap ()[i] - min_[i]) / step_[i];
- Eigen::Vector2i idxi = idxf.cast<int> ();
+ idxf[i] = (p.getVector3fMap()[i] - min_[i]) / step_[i];
+ Eigen::Vector2i idxi = idxf.cast<int>();
for (std::size_t i = 0; i < 2; i++)
if (idxi[i] >= cells_[i] || idxi[i] < 0)
return nullptr;
// const cast to avoid duplicating this function in const and
// non-const variants...
- return const_cast<NormalDist*> (&normal_distributions_.coeffRef (idxi[0], idxi[1]));
+ return const_cast<NormalDist*>(&normal_distributions_.coeffRef(idxi[0], idxi[1]));
}
Eigen::Vector2f min_;
};
/** \brief Build a Normal Distributions Transform of a 2D point cloud. This
- * consists of the sum of four overlapping models of the original points
- * with normal distributions.
- * The value and derivatives of the model at any point can be evaluated
- * with the test (...) function.
- */
+ * consists of the sum of four overlapping models of the original points
+ * with normal distributions.
+ * The value and derivatives of the model at any point can be evaluated
+ * with the test (...) function.
+ */
template <typename PointT>
-class NDT2D: public boost::noncopyable
-{
+class NDT2D : public boost::noncopyable {
using PointCloud = pcl::PointCloud<PointT>;
using PointCloudConstPtr = typename PointCloud::ConstPtr;
using SingleGrid = NDTSingleGrid<PointT>;
public:
/** \brief
- * \param[in] cloud the input point cloud
- * \param[in] about Centre of the grid for normal distributions model
- * \param[in] extent Extent of grid for normal distributions model
- * \param[in] step Size of region that each normal distribution will model
- */
- NDT2D (PointCloudConstPtr cloud,
- const Eigen::Vector2f& about,
- const Eigen::Vector2f& extent,
- const Eigen::Vector2f& step)
+ * \param[in] cloud the input point cloud
+ * \param[in] about Centre of the grid for normal distributions model
+ * \param[in] extent Extent of grid for normal distributions model
+ * \param[in] step Size of region that each normal distribution will model
+ */
+ NDT2D(PointCloudConstPtr cloud,
+ const Eigen::Vector2f& about,
+ const Eigen::Vector2f& extent,
+ const Eigen::Vector2f& step)
{
- Eigen::Vector2f dx (step[0]/2, 0);
- Eigen::Vector2f dy (0, step[1]/2);
- single_grids_[0].reset(new SingleGrid (cloud, about, extent, step));
- single_grids_[1].reset(new SingleGrid (cloud, about +dx, extent, step));
- single_grids_[2].reset(new SingleGrid (cloud, about +dy, extent, step));
- single_grids_[3].reset(new SingleGrid (cloud, about +dx+dy, extent, step));
+ Eigen::Vector2f dx(step[0] / 2, 0);
+ Eigen::Vector2f dy(0, step[1] / 2);
+ single_grids_[0].reset(new SingleGrid(cloud, about, extent, step));
+ single_grids_[1].reset(new SingleGrid(cloud, about + dx, extent, step));
+ single_grids_[2].reset(new SingleGrid(cloud, about + dy, extent, step));
+ single_grids_[3].reset(new SingleGrid(cloud, about + dx + dy, extent, step));
}
- /** \brief Return the 'score' (denormalised likelihood) and derivatives of score of the point p given this distribution.
- * \param[in] transformed_pt Location to evaluate at.
- * \param[in] cos_theta sin(theta) of the current rotation angle of rigid transformation: to avoid repeated evaluation
- * \param[in] sin_theta cos(theta) of the current rotation angle of rigid transformation: to avoid repeated evaluation
- */
- ValueAndDerivatives<3,double>
- test (const PointT& transformed_pt, const double& cos_theta, const double& sin_theta) const
+ /** \brief Return the 'score' (denormalised likelihood) and derivatives of score of
+ * the point p given this distribution. \param[in] transformed_pt Location to
+ * evaluate at. \param[in] cos_theta sin(theta) of the current rotation angle
+ * of rigid transformation: to avoid repeated evaluation \param[in] sin_theta
+ * cos(theta) of the current rotation angle of rigid transformation: to avoid repeated
+ * evaluation
+ */
+ ValueAndDerivatives<3, double>
+ test(const PointT& transformed_pt,
+ const double& cos_theta,
+ const double& sin_theta) const
{
- ValueAndDerivatives<3,double> r = ValueAndDerivatives<3,double>::Zero ();
- for (const auto &single_grid : single_grids_)
- r += single_grid->test (transformed_pt, cos_theta, sin_theta);
+ ValueAndDerivatives<3, double> r = ValueAndDerivatives<3, double>::Zero();
+ for (const auto& single_grid : single_grids_)
+ r += single_grid->test(transformed_pt, cos_theta, sin_theta);
return r;
}
} // namespace ndt2d
} // namespace pcl
-
-namespace Eigen
-{
+namespace Eigen {
/* This NumTraits specialisation is necessary because NormalDist is used as
-* the element type of an Eigen Matrix.
-*/
-template<typename PointT>
-struct NumTraits<pcl::ndt2d::NormalDist<PointT> >
-{
+ * the element type of an Eigen Matrix.
+ */
+template <typename PointT>
+struct NumTraits<pcl::ndt2d::NormalDist<PointT>> {
using Real = double;
using Literal = double;
- static Real dummy_precision () { return 1.0; }
- enum
+ static Real
+ dummy_precision()
{
+ return 1.0;
+ }
+ enum {
IsComplex = 0,
IsInteger = 0,
IsSigned = 0,
} // namespace Eigen
+namespace pcl {
-namespace pcl
-{
-
-template <typename PointSource, typename PointTarget> void
-NormalDistributionsTransform2D<PointSource, PointTarget>::computeTransformation (PointCloudSource &output, const Eigen::Matrix4f &guess)
+template <typename PointSource, typename PointTarget>
+void
+NormalDistributionsTransform2D<PointSource, PointTarget>::computeTransformation(
+ PointCloudSource& output, const Eigen::Matrix4f& guess)
{
PointCloudSource intm_cloud = output;
nr_iterations_ = 0;
converged_ = false;
- if (guess != Eigen::Matrix4f::Identity ())
- {
+ if (guess != Eigen::Matrix4f::Identity()) {
transformation_ = guess;
- transformPointCloud (output, intm_cloud, transformation_);
+ transformPointCloud(output, intm_cloud, transformation_);
}
// build Normal Distribution Transform of target cloud:
- ndt2d::NDT2D<PointTarget> target_ndt (target_, grid_centre_, grid_extent_, grid_step_);
+ ndt2d::NDT2D<PointTarget> target_ndt(target_, grid_centre_, grid_extent_, grid_step_);
// can't seem to use .block<> () member function on transformation_
// directly... gcc bug?
Eigen::Matrix4f& transformation = transformation_;
-
// work with x translation, y translation and z rotation: extending to 3D
// would be some tricky maths, but not impossible.
- const Eigen::Matrix3f initial_rot (transformation.block<3,3> (0,0));
- const Eigen::Vector3f rot_x (initial_rot*Eigen::Vector3f::UnitX ());
- const double z_rotation = std::atan2 (rot_x[1], rot_x[0]);
+ const Eigen::Matrix3f initial_rot(transformation.block<3, 3>(0, 0));
+ const Eigen::Vector3f rot_x(initial_rot * Eigen::Vector3f::UnitX());
+ const double z_rotation = std::atan2(rot_x[1], rot_x[0]);
- Eigen::Vector3d xytheta_transformation (
- transformation (0,3),
- transformation (1,3),
- z_rotation
- );
+ Eigen::Vector3d xytheta_transformation(
+ transformation(0, 3), transformation(1, 3), z_rotation);
- while (!converged_)
- {
- const double cos_theta = std::cos (xytheta_transformation[2]);
- const double sin_theta = std::sin (xytheta_transformation[2]);
+ while (!converged_) {
+ const double cos_theta = std::cos(xytheta_transformation[2]);
+ const double sin_theta = std::sin(xytheta_transformation[2]);
previous_transformation_ = transformation;
- ndt2d::ValueAndDerivatives<3, double> score = ndt2d::ValueAndDerivatives<3, double>::Zero ();
- for (std::size_t i = 0; i < intm_cloud.size (); i++)
- score += target_ndt.test (intm_cloud[i], cos_theta, sin_theta);
+ ndt2d::ValueAndDerivatives<3, double> score =
+ ndt2d::ValueAndDerivatives<3, double>::Zero();
+ for (std::size_t i = 0; i < intm_cloud.size(); i++)
+ score += target_ndt.test(intm_cloud[i], cos_theta, sin_theta);
- PCL_DEBUG ("[pcl::NormalDistributionsTransform2D::computeTransformation] NDT score %f (x=%f,y=%f,r=%f)\n",
- float (score.value), xytheta_transformation[0], xytheta_transformation[1], xytheta_transformation[2]
- );
+ PCL_DEBUG("[pcl::NormalDistributionsTransform2D::computeTransformation] NDT score "
+ "%f (x=%f,y=%f,r=%f)\n",
+ float(score.value),
+ xytheta_transformation[0],
+ xytheta_transformation[1],
+ xytheta_transformation[2]);
- if (score.value != 0)
- {
+ if (score.value != 0) {
// test for positive definiteness, and adjust to ensure it if necessary:
Eigen::EigenSolver<Eigen::Matrix3d> solver;
- solver.compute (score.hessian, false);
+ solver.compute(score.hessian, false);
double min_eigenvalue = 0;
- for (int i = 0; i <3; i++)
- if (solver.eigenvalues ()[i].real () < min_eigenvalue)
- min_eigenvalue = solver.eigenvalues ()[i].real ();
+ for (int i = 0; i < 3; i++)
+ if (solver.eigenvalues()[i].real() < min_eigenvalue)
+ min_eigenvalue = solver.eigenvalues()[i].real();
// ensure "safe" positive definiteness: this is a detail missing
// from the original paper
- if (min_eigenvalue < 0)
- {
+ if (min_eigenvalue < 0) {
double lambda = 1.1 * min_eigenvalue - 1;
- score.hessian += Eigen::Vector3d (-lambda, -lambda, -lambda).asDiagonal ();
- solver.compute (score.hessian, false);
- PCL_DEBUG ("[pcl::NormalDistributionsTransform2D::computeTransformation] adjust hessian: %f: new eigenvalues:%f %f %f\n",
- float (lambda),
- solver.eigenvalues ()[0].real (),
- solver.eigenvalues ()[1].real (),
- solver.eigenvalues ()[2].real ()
- );
+ score.hessian += Eigen::Vector3d(-lambda, -lambda, -lambda).asDiagonal();
+ solver.compute(score.hessian, false);
+ PCL_DEBUG("[pcl::NormalDistributionsTransform2D::computeTransformation] adjust "
+ "hessian: %f: new eigenvalues:%f %f %f\n",
+ float(lambda),
+ solver.eigenvalues()[0].real(),
+ solver.eigenvalues()[1].real(),
+ solver.eigenvalues()[2].real());
}
- assert (solver.eigenvalues ()[0].real () >= 0 &&
- solver.eigenvalues ()[1].real () >= 0 &&
- solver.eigenvalues ()[2].real () >= 0);
+ assert(solver.eigenvalues()[0].real() >= 0 &&
+ solver.eigenvalues()[1].real() >= 0 &&
+ solver.eigenvalues()[2].real() >= 0);
- Eigen::Vector3d delta_transformation (-score.hessian.inverse () * score.grad);
- Eigen::Vector3d new_transformation = xytheta_transformation + newton_lambda_.cwiseProduct (delta_transformation);
+ Eigen::Vector3d delta_transformation(-score.hessian.inverse() * score.grad);
+ Eigen::Vector3d new_transformation =
+ xytheta_transformation + newton_lambda_.cwiseProduct(delta_transformation);
xytheta_transformation = new_transformation;
// update transformation matrix from x, y, theta:
- transformation.block<3,3> (0,0).matrix () = Eigen::Matrix3f (Eigen::AngleAxisf (static_cast<float> (xytheta_transformation[2]), Eigen::Vector3f::UnitZ ()));
- transformation.block<3,1> (0,3).matrix () = Eigen::Vector3f (static_cast<float> (xytheta_transformation[0]), static_cast<float> (xytheta_transformation[1]), 0.0f);
-
- //std::cout << "new transformation:\n" << transformation << std::endl;
+ transformation.block<3, 3>(0, 0).matrix() = Eigen::Matrix3f(Eigen::AngleAxisf(
+ static_cast<float>(xytheta_transformation[2]), Eigen::Vector3f::UnitZ()));
+ transformation.block<3, 1>(0, 3).matrix() =
+ Eigen::Vector3f(static_cast<float>(xytheta_transformation[0]),
+ static_cast<float>(xytheta_transformation[1]),
+ 0.0f);
+
+ // std::cout << "new transformation:\n" << transformation << std::endl;
}
- else
- {
- PCL_ERROR ("[pcl::NormalDistributionsTransform2D::computeTransformation] no overlap: try increasing the size or reducing the step of the grid\n");
+ else {
+ PCL_ERROR("[pcl::NormalDistributionsTransform2D::computeTransformation] no "
+ "overlap: try increasing the size or reducing the step of the grid\n");
break;
}
- transformPointCloud (output, intm_cloud, transformation);
+ transformPointCloud(output, intm_cloud, transformation);
nr_iterations_++;
if (update_visualizer_)
- update_visualizer_ (output, *indices_, *target_, *indices_);
+ update_visualizer_(output, *indices_, *target_, *indices_);
- //std::cout << "eps=" << std::abs ((transformation - previous_transformation_).sum ()) << std::endl;
+ // std::cout << "eps=" << std::abs ((transformation - previous_transformation_).sum
+ // ()) << std::endl;
- Eigen::Matrix4f transformation_delta = transformation.inverse() * previous_transformation_;
- double cos_angle = 0.5 * (transformation_delta.coeff (0, 0) + transformation_delta.coeff (1, 1) + transformation_delta.coeff (2, 2) - 1);
- double translation_sqr = transformation_delta.coeff (0, 3) * transformation_delta.coeff (0, 3) +
- transformation_delta.coeff (1, 3) * transformation_delta.coeff (1, 3) +
- transformation_delta.coeff (2, 3) * transformation_delta.coeff (2, 3);
+ Eigen::Matrix4f transformation_delta =
+ transformation.inverse() * previous_transformation_;
+ double cos_angle =
+ 0.5 * (transformation_delta.coeff(0, 0) + transformation_delta.coeff(1, 1) +
+ transformation_delta.coeff(2, 2) - 1);
+ double translation_sqr =
+ transformation_delta.coeff(0, 3) * transformation_delta.coeff(0, 3) +
+ transformation_delta.coeff(1, 3) * transformation_delta.coeff(1, 3) +
+ transformation_delta.coeff(2, 3) * transformation_delta.coeff(2, 3);
if (nr_iterations_ >= max_iterations_ ||
- ((transformation_epsilon_ > 0 && translation_sqr <= transformation_epsilon_) && (transformation_rotation_epsilon_ > 0 && cos_angle >= transformation_rotation_epsilon_)) ||
- ((transformation_epsilon_ <= 0) && (transformation_rotation_epsilon_ > 0 && cos_angle >= transformation_rotation_epsilon_)) ||
- ((transformation_epsilon_ > 0 && translation_sqr <= transformation_epsilon_) && (transformation_rotation_epsilon_ <= 0)))
- {
+ ((transformation_epsilon_ > 0 && translation_sqr <= transformation_epsilon_) &&
+ (transformation_rotation_epsilon_ > 0 &&
+ cos_angle >= transformation_rotation_epsilon_)) ||
+ ((transformation_epsilon_ <= 0) &&
+ (transformation_rotation_epsilon_ > 0 &&
+ cos_angle >= transformation_rotation_epsilon_)) ||
+ ((transformation_epsilon_ > 0 && translation_sqr <= transformation_epsilon_) &&
+ (transformation_rotation_epsilon_ <= 0))) {
converged_ = true;
}
}
} // namespace pcl
-#endif // PCL_NDT_2D_IMPL_H_
-
+#endif // PCL_NDT_2D_IMPL_H_
#ifndef PCL_REGISTRATION_IMPL_PAIRWISE_GRAPH_REGISTRATION_HPP_
#define PCL_REGISTRATION_IMPL_PAIRWISE_GRAPH_REGISTRATION_HPP_
+namespace pcl {
-namespace pcl
+template <typename GraphT, typename PointT>
+void
+PairwiseGraphRegistration<GraphT, PointT>::computeRegistration()
{
-
-template <typename GraphT, typename PointT> void
-PairwiseGraphRegistration<GraphT, PointT>::computeRegistration ()
-{
- if (!registration_method_)
- {
- PCL_ERROR ("[pcl::PairwiseGraphRegistration::computeRegistration] No registration method set!\n");
+ if (!registration_method_) {
+ PCL_ERROR("[pcl::PairwiseGraphRegistration::computeRegistration] No registration "
+ "method set!\n");
return;
}
- typename std::vector<GraphHandlerVertex>::iterator last_vx_it = last_vertices_.begin ();
- if (last_aligned_vertex_ == boost::graph_traits<GraphT>::null_vertex ())
- {
+ typename std::vector<GraphHandlerVertex>::iterator last_vx_it =
+ last_vertices_.begin();
+ if (last_aligned_vertex_ == boost::graph_traits<GraphT>::null_vertex()) {
last_aligned_vertex_ = *last_vx_it;
++last_vx_it;
}
pcl::PointCloud<PointT> fake_cloud;
- registration_method_->setInputTarget (boost::get_cloud<PointT> (last_aligned_vertex_, *(graph_handler_->getGraph ())));
- for(; last_vx_it < last_vertices_.end (); ++last_vx_it)
- {
- registration_method_->setInputCloud (boost::get_cloud<PointT> (*last_vx_it, *(graph_handler_->getGraph ())));
+ registration_method_->setInputTarget(
+ boost::get_cloud<PointT>(last_aligned_vertex_, *(graph_handler_->getGraph())));
+ for (; last_vx_it < last_vertices_.end(); ++last_vx_it) {
+ registration_method_->setInputCloud(
+ boost::get_cloud<PointT>(*last_vx_it, *(graph_handler_->getGraph())));
- const Eigen::Matrix4f last_aligned_vertex_pose = boost::get_pose (last_aligned_vertex_, *(graph_handler_->getGraph ()));
- if (!incremental_)
- {
- const Eigen::Matrix4f guess = last_aligned_vertex_pose.transpose () * boost::get_pose (*last_vx_it, *(graph_handler_->getGraph ()));
- registration_method_->align (fake_cloud, guess);
- } else
- registration_method_->align (fake_cloud);
+ const Eigen::Matrix4f last_aligned_vertex_pose =
+ boost::get_pose(last_aligned_vertex_, *(graph_handler_->getGraph()));
+ if (!incremental_) {
+ const Eigen::Matrix4f guess =
+ last_aligned_vertex_pose.transpose() *
+ boost::get_pose(*last_vx_it, *(graph_handler_->getGraph()));
+ registration_method_->align(fake_cloud, guess);
+ }
+ else
+ registration_method_->align(fake_cloud);
- const Eigen::Matrix4f global_ref_final_tr = last_aligned_vertex_pose * registration_method_->getFinalTransformation ();
- boost::set_estimate<PointT> (*last_vx_it, global_ref_final_tr, *(graph_handler_->getGraph ()));
+ const Eigen::Matrix4f global_ref_final_tr =
+ last_aligned_vertex_pose * registration_method_->getFinalTransformation();
+ boost::set_estimate<PointT>(
+ *last_vx_it, global_ref_final_tr, *(graph_handler_->getGraph()));
last_aligned_vertex_ = *last_vx_it;
- registration_method_->setInputTarget (boost::get_cloud<PointT> (last_aligned_vertex_, *(graph_handler_->getGraph ())));
+ registration_method_->setInputTarget(
+ boost::get_cloud<PointT>(last_aligned_vertex_, *(graph_handler_->getGraph())));
}
}
} // namespace pcl
-#endif //PCL_REGISTRATION_IMPL_PAIRWISE_GRAPH_REGISTRATION_HPP_
-
+#endif // PCL_REGISTRATION_IMPL_PAIRWISE_GRAPH_REGISTRATION_HPP_
*
*/
-
#ifndef PCL_REGISTRATION_IMPL_PPF_REGISTRATION_H_
#define PCL_REGISTRATION_IMPL_PPF_REGISTRATION_H_
-#include <pcl/registration/ppf_registration.h>
-#include <pcl/features/ppf.h>
#include <pcl/common/transforms.h>
-
#include <pcl/features/pfh.h>
+#include <pcl/features/pfh_tools.h> // for computePairFeatures
+#include <pcl/features/ppf.h>
+#include <pcl/registration/ppf_registration.h>
//////////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointSource, typename PointTarget> void
-pcl::PPFRegistration<PointSource, PointTarget>::setInputTarget (const PointCloudTargetConstPtr &cloud)
+template <typename PointSource, typename PointTarget>
+void
+pcl::PPFRegistration<PointSource, PointTarget>::setInputTarget(
+ const PointCloudTargetConstPtr& cloud)
{
- Registration<PointSource, PointTarget>::setInputTarget (cloud);
+ Registration<PointSource, PointTarget>::setInputTarget(cloud);
- scene_search_tree_ = typename pcl::KdTreeFLANN<PointTarget>::Ptr (new pcl::KdTreeFLANN<PointTarget>);
- scene_search_tree_->setInputCloud (target_);
+ scene_search_tree_ =
+ typename pcl::KdTreeFLANN<PointTarget>::Ptr(new pcl::KdTreeFLANN<PointTarget>);
+ scene_search_tree_->setInputCloud(target_);
}
//////////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointSource, typename PointTarget> void
-pcl::PPFRegistration<PointSource, PointTarget>::computeTransformation (PointCloudSource &output, const Eigen::Matrix4f& guess)
+template <typename PointSource, typename PointTarget>
+void
+pcl::PPFRegistration<PointSource, PointTarget>::computeTransformation(
+ PointCloudSource& output, const Eigen::Matrix4f& guess)
{
- if (!search_method_)
- {
- PCL_ERROR("[pcl::PPFRegistration::computeTransformation] Search method not set - skipping computeTransformation!\n");
+ if (!search_method_) {
+ PCL_ERROR("[pcl::PPFRegistration::computeTransformation] Search method not set - "
+ "skipping computeTransformation!\n");
return;
}
- if (guess != Eigen::Matrix4f::Identity ())
- {
- PCL_ERROR("[pcl::PPFRegistration::computeTransformation] setting initial transform (guess) not implemented!\n");
+ if (guess != Eigen::Matrix4f::Identity()) {
+ PCL_ERROR("[pcl::PPFRegistration::computeTransformation] setting initial transform "
+ "(guess) not implemented!\n");
}
const auto aux_size = static_cast<std::size_t>(
const std::vector<unsigned int> tmp_vec(aux_size, 0);
std::vector<std::vector<unsigned int>> accumulator_array(input_->size(), tmp_vec);
- PCL_INFO ("Accumulator array size: %u x %u.\n", accumulator_array.size (), accumulator_array.back ().size ());
+ PCL_INFO("Accumulator array size: %u x %u.\n",
+ accumulator_array.size(),
+ accumulator_array.back().size());
PoseWithVotesList voted_poses;
- // Consider every <scene_reference_point_sampling_rate>-th point as the reference point => fix s_r
+ // Consider every <scene_reference_point_sampling_rate>-th point as the reference
+ // point => fix s_r
float f1, f2, f3, f4;
- for (std::size_t scene_reference_index = 0; scene_reference_index < target_->size (); scene_reference_index += scene_reference_point_sampling_rate_)
- {
- Eigen::Vector3f scene_reference_point = (*target_)[scene_reference_index].getVector3fMap (),
- scene_reference_normal = (*target_)[scene_reference_index].getNormalVector3fMap ();
-
- float rotation_angle_sg = std::acos (scene_reference_normal.dot (Eigen::Vector3f::UnitX ()));
- bool parallel_to_x_sg = (scene_reference_normal.y() == 0.0f && scene_reference_normal.z() == 0.0f);
- Eigen::Vector3f rotation_axis_sg = (parallel_to_x_sg)?(Eigen::Vector3f::UnitY ()):(scene_reference_normal.cross (Eigen::Vector3f::UnitX ()). normalized());
- Eigen::AngleAxisf rotation_sg (rotation_angle_sg, rotation_axis_sg);
- Eigen::Affine3f transform_sg (Eigen::Translation3f ( rotation_sg * ((-1) * scene_reference_point)) * rotation_sg);
+ for (index_t scene_reference_index = 0;
+ scene_reference_index < static_cast<index_t>(target_->size());
+ scene_reference_index += scene_reference_point_sampling_rate_) {
+ Eigen::Vector3f scene_reference_point =
+ (*target_)[scene_reference_index].getVector3fMap(),
+ scene_reference_normal =
+ (*target_)[scene_reference_index].getNormalVector3fMap();
+
+ float rotation_angle_sg =
+ std::acos(scene_reference_normal.dot(Eigen::Vector3f::UnitX()));
+ bool parallel_to_x_sg =
+ (scene_reference_normal.y() == 0.0f && scene_reference_normal.z() == 0.0f);
+ Eigen::Vector3f rotation_axis_sg =
+ (parallel_to_x_sg)
+ ? (Eigen::Vector3f::UnitY())
+ : (scene_reference_normal.cross(Eigen::Vector3f::UnitX()).normalized());
+ Eigen::AngleAxisf rotation_sg(rotation_angle_sg, rotation_axis_sg);
+ Eigen::Affine3f transform_sg(
+ Eigen::Translation3f(rotation_sg * ((-1) * scene_reference_point)) *
+ rotation_sg);
// For every other point in the scene => now have pair (s_r, s_i) fixed
- std::vector<int> indices;
+ pcl::Indices indices;
std::vector<float> distances;
- scene_search_tree_->radiusSearch ((*target_)[scene_reference_index],
- search_method_->getModelDiameter () /2,
+ scene_search_tree_->radiusSearch((*target_)[scene_reference_index],
+ search_method_->getModelDiameter() / 2,
indices,
distances);
- for(const std::size_t &scene_point_index : indices)
-// for(std::size_t i = 0; i < target_->size (); ++i)
+ for (const auto& scene_point_index : indices)
+ // for(std::size_t i = 0; i < target_->size (); ++i)
{
- //size_t scene_point_index = i;
- if (scene_reference_index != scene_point_index)
- {
- if (/*pcl::computePPFPairFeature*/pcl::computePairFeatures ((*target_)[scene_reference_index].getVector4fMap (),
- (*target_)[scene_reference_index].getNormalVector4fMap (),
- (*target_)[scene_point_index].getVector4fMap (),
- (*target_)[scene_point_index].getNormalVector4fMap (),
- f1, f2, f3, f4))
- {
- std::vector<std::pair<std::size_t, std::size_t> > nearest_indices;
- search_method_->nearestNeighborSearch (f1, f2, f3, f4, nearest_indices);
+ // size_t scene_point_index = i;
+ if (scene_reference_index != scene_point_index) {
+ if (/*pcl::computePPFPairFeature*/ pcl::computePairFeatures(
+ (*target_)[scene_reference_index].getVector4fMap(),
+ (*target_)[scene_reference_index].getNormalVector4fMap(),
+ (*target_)[scene_point_index].getVector4fMap(),
+ (*target_)[scene_point_index].getNormalVector4fMap(),
+ f1,
+ f2,
+ f3,
+ f4)) {
+ std::vector<std::pair<std::size_t, std::size_t>> nearest_indices;
+ search_method_->nearestNeighborSearch(f1, f2, f3, f4, nearest_indices);
// Compute alpha_s angle
- Eigen::Vector3f scene_point = (*target_)[scene_point_index].getVector3fMap ();
+ Eigen::Vector3f scene_point = (*target_)[scene_point_index].getVector3fMap();
Eigen::Vector3f scene_point_transformed = transform_sg * scene_point;
- float alpha_s = std::atan2 ( -scene_point_transformed(2), scene_point_transformed(1));
- if (std::sin (alpha_s) * scene_point_transformed(2) < 0.0f)
+ float alpha_s =
+ std::atan2(-scene_point_transformed(2), scene_point_transformed(1));
+ if (std::sin(alpha_s) * scene_point_transformed(2) < 0.0f)
alpha_s *= (-1);
alpha_s *= (-1);
// Go through point pairs in the model with the same discretized feature
- for (const auto &nearest_index : nearest_indices)
- {
+ for (const auto& nearest_index : nearest_indices) {
std::size_t model_reference_index = nearest_index.first;
std::size_t model_point_index = nearest_index.second;
// Calculate angle alpha = alpha_m - alpha_s
- float alpha = search_method_->alpha_m_[model_reference_index][model_point_index] - alpha_s;
- unsigned int alpha_discretized = static_cast<unsigned int> (std::floor (alpha) + std::floor (M_PI / search_method_->getAngleDiscretizationStep ()));
- accumulator_array[model_reference_index][alpha_discretized] ++;
+ float alpha =
+ search_method_->alpha_m_[model_reference_index][model_point_index] -
+ alpha_s;
+ unsigned int alpha_discretized = static_cast<unsigned int>(
+ std::floor(alpha) +
+ std::floor(M_PI / search_method_->getAngleDiscretizationStep()));
+ accumulator_array[model_reference_index][alpha_discretized]++;
}
}
- else PCL_ERROR ("[pcl::PPFRegistration::computeTransformation] Computing pair feature vector between points %u and %u went wrong.\n", scene_reference_index, scene_point_index);
+ else
+ PCL_ERROR("[pcl::PPFRegistration::computeTransformation] Computing pair "
+ "feature vector between points %u and %u went wrong.\n",
+ scene_reference_index,
+ scene_point_index);
}
}
std::size_t max_votes_i = 0, max_votes_j = 0;
unsigned int max_votes = 0;
- for (std::size_t i = 0; i < accumulator_array.size (); ++i)
- for (std::size_t j = 0; j < accumulator_array.back ().size (); ++j)
- {
- if (accumulator_array[i][j] > max_votes)
- {
+ for (std::size_t i = 0; i < accumulator_array.size(); ++i)
+ for (std::size_t j = 0; j < accumulator_array.back().size(); ++j) {
+ if (accumulator_array[i][j] > max_votes) {
max_votes = accumulator_array[i][j];
max_votes_i = i;
max_votes_j = j;
}
- // Reset accumulator_array for the next set of iterations with a new scene reference point
+ // Reset accumulator_array for the next set of iterations with a new scene
+ // reference point
accumulator_array[i][j] = 0;
}
- Eigen::Vector3f model_reference_point = (*input_)[max_votes_i].getVector3fMap (),
- model_reference_normal = (*input_)[max_votes_i].getNormalVector3fMap ();
- float rotation_angle_mg = std::acos (model_reference_normal.dot (Eigen::Vector3f::UnitX ()));
- bool parallel_to_x_mg = (model_reference_normal.y() == 0.0f && model_reference_normal.z() == 0.0f);
- Eigen::Vector3f rotation_axis_mg = (parallel_to_x_mg)?(Eigen::Vector3f::UnitY ()):(model_reference_normal.cross (Eigen::Vector3f::UnitX ()). normalized());
- Eigen::AngleAxisf rotation_mg (rotation_angle_mg, rotation_axis_mg);
- Eigen::Affine3f transform_mg (Eigen::Translation3f ( rotation_mg * ((-1) * model_reference_point)) * rotation_mg);
- Eigen::Affine3f max_transform =
- transform_sg.inverse () *
- Eigen::AngleAxisf ((static_cast<float> (max_votes_j) - std::floor (static_cast<float> (M_PI) / search_method_->getAngleDiscretizationStep ())) * search_method_->getAngleDiscretizationStep (), Eigen::Vector3f::UnitX ()) *
- transform_mg;
-
- voted_poses.push_back (PoseWithVotes (max_transform, max_votes));
+ Eigen::Vector3f model_reference_point = (*input_)[max_votes_i].getVector3fMap(),
+ model_reference_normal =
+ (*input_)[max_votes_i].getNormalVector3fMap();
+ float rotation_angle_mg =
+ std::acos(model_reference_normal.dot(Eigen::Vector3f::UnitX()));
+ bool parallel_to_x_mg =
+ (model_reference_normal.y() == 0.0f && model_reference_normal.z() == 0.0f);
+ Eigen::Vector3f rotation_axis_mg =
+ (parallel_to_x_mg)
+ ? (Eigen::Vector3f::UnitY())
+ : (model_reference_normal.cross(Eigen::Vector3f::UnitX()).normalized());
+ Eigen::AngleAxisf rotation_mg(rotation_angle_mg, rotation_axis_mg);
+ Eigen::Affine3f transform_mg(
+ Eigen::Translation3f(rotation_mg * ((-1) * model_reference_point)) *
+ rotation_mg);
+ Eigen::Affine3f max_transform =
+ transform_sg.inverse() *
+ Eigen::AngleAxisf((static_cast<float>(max_votes_j) -
+ std::floor(static_cast<float>(M_PI) /
+ search_method_->getAngleDiscretizationStep())) *
+ search_method_->getAngleDiscretizationStep(),
+ Eigen::Vector3f::UnitX()) *
+ transform_mg;
+
+ voted_poses.push_back(PoseWithVotes(max_transform, max_votes));
}
- PCL_DEBUG ("Done with the Hough Transform ...\n");
+ PCL_DEBUG("Done with the Hough Transform ...\n");
// Cluster poses for filtering out outliers and obtaining more precise results
PoseWithVotesList results;
- clusterPoses (voted_poses, results);
+ clusterPoses(voted_poses, results);
- pcl::transformPointCloud (*input_, output, results.front ().pose);
+ pcl::transformPointCloud(*input_, output, results.front().pose);
- transformation_ = final_transformation_ = results.front ().pose.matrix ();
+ transformation_ = final_transformation_ = results.front().pose.matrix();
converged_ = true;
}
-
//////////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointSource, typename PointTarget> void
-pcl::PPFRegistration<PointSource, PointTarget>::clusterPoses (typename pcl::PPFRegistration<PointSource, PointTarget>::PoseWithVotesList &poses,
- typename pcl::PPFRegistration<PointSource, PointTarget>::PoseWithVotesList &result)
+template <typename PointSource, typename PointTarget>
+void
+pcl::PPFRegistration<PointSource, PointTarget>::clusterPoses(
+ typename pcl::PPFRegistration<PointSource, PointTarget>::PoseWithVotesList& poses,
+ typename pcl::PPFRegistration<PointSource, PointTarget>::PoseWithVotesList& result)
{
- PCL_INFO ("Clustering poses ...\n");
+ PCL_INFO("Clustering poses ...\n");
// Start off by sorting the poses by the number of votes
- sort(poses.begin (), poses.end (), poseWithVotesCompareFunction);
+ sort(poses.begin(), poses.end(), poseWithVotesCompareFunction);
std::vector<PoseWithVotesList> clusters;
- std::vector<std::pair<std::size_t, unsigned int> > cluster_votes;
- for (std::size_t poses_i = 0; poses_i < poses.size(); ++ poses_i)
- {
+ std::vector<std::pair<std::size_t, unsigned int>> cluster_votes;
+ for (std::size_t poses_i = 0; poses_i < poses.size(); ++poses_i) {
bool found_cluster = false;
- for (std::size_t clusters_i = 0; clusters_i < clusters.size(); ++ clusters_i)
- {
- if (posesWithinErrorBounds (poses[poses_i].pose, clusters[clusters_i].front ().pose))
- {
+ for (std::size_t clusters_i = 0; clusters_i < clusters.size(); ++clusters_i) {
+ if (posesWithinErrorBounds(poses[poses_i].pose,
+ clusters[clusters_i].front().pose)) {
found_cluster = true;
- clusters[clusters_i].push_back (poses[poses_i]);
+ clusters[clusters_i].push_back(poses[poses_i]);
cluster_votes[clusters_i].second += poses[poses_i].votes;
break;
}
}
- if (!found_cluster)
- {
+ if (!found_cluster) {
// Create a new cluster with the current pose
PoseWithVotesList new_cluster;
- new_cluster.push_back (poses[poses_i]);
- clusters.push_back (new_cluster);
- cluster_votes.push_back (std::pair<std::size_t, unsigned int> (clusters.size () - 1, poses[poses_i].votes));
+ new_cluster.push_back(poses[poses_i]);
+ clusters.push_back(new_cluster);
+ cluster_votes.push_back(std::pair<std::size_t, unsigned int>(
+ clusters.size() - 1, poses[poses_i].votes));
}
- }
+ }
// Sort clusters by total number of votes
- std::sort (cluster_votes.begin (), cluster_votes.end (), clusterVotesCompareFunction);
+ std::sort(cluster_votes.begin(), cluster_votes.end(), clusterVotesCompareFunction);
// Compute pose average and put them in result vector
- /// @todo some kind of threshold for determining whether a cluster has enough votes or not...
- /// now just taking the first three clusters
- result.clear ();
- std::size_t max_clusters = (clusters.size () < 3) ? clusters.size () : 3;
- for (std::size_t cluster_i = 0; cluster_i < max_clusters; ++ cluster_i)
- {
- PCL_INFO ("Winning cluster has #votes: %d and #poses voted: %d.\n", cluster_votes[cluster_i].second, clusters[cluster_votes[cluster_i].first].size ());
- Eigen::Vector3f translation_average (0.0, 0.0, 0.0);
- Eigen::Vector4f rotation_average (0.0, 0.0, 0.0, 0.0);
- for (typename PoseWithVotesList::iterator v_it = clusters[cluster_votes[cluster_i].first].begin (); v_it != clusters[cluster_votes[cluster_i].first].end (); ++ v_it)
- {
- translation_average += v_it->pose.translation ();
- /// averaging rotations by just averaging the quaternions in 4D space - reference "On Averaging Rotations" by CLAUS GRAMKOW
- rotation_average += Eigen::Quaternionf (v_it->pose.rotation ()).coeffs ();
+ /// @todo some kind of threshold for determining whether a cluster has enough votes or
+ /// not... now just taking the first three clusters
+ result.clear();
+ std::size_t max_clusters = (clusters.size() < 3) ? clusters.size() : 3;
+ for (std::size_t cluster_i = 0; cluster_i < max_clusters; ++cluster_i) {
+ PCL_INFO("Winning cluster has #votes: %d and #poses voted: %d.\n",
+ cluster_votes[cluster_i].second,
+ clusters[cluster_votes[cluster_i].first].size());
+ Eigen::Vector3f translation_average(0.0, 0.0, 0.0);
+ Eigen::Vector4f rotation_average(0.0, 0.0, 0.0, 0.0);
+ for (typename PoseWithVotesList::iterator v_it =
+ clusters[cluster_votes[cluster_i].first].begin();
+ v_it != clusters[cluster_votes[cluster_i].first].end();
+ ++v_it) {
+ translation_average += v_it->pose.translation();
+ /// averaging rotations by just averaging the quaternions in 4D space - reference
+ /// "On Averaging Rotations" by CLAUS GRAMKOW
+ rotation_average += Eigen::Quaternionf(v_it->pose.rotation()).coeffs();
}
- translation_average /= static_cast<float> (clusters[cluster_votes[cluster_i].first].size ());
- rotation_average /= static_cast<float> (clusters[cluster_votes[cluster_i].first].size ());
+ translation_average /=
+ static_cast<float>(clusters[cluster_votes[cluster_i].first].size());
+ rotation_average /=
+ static_cast<float>(clusters[cluster_votes[cluster_i].first].size());
Eigen::Affine3f transform_average;
- transform_average.translation ().matrix () = translation_average;
- transform_average.linear ().matrix () = Eigen::Quaternionf (rotation_average).normalized().toRotationMatrix ();
+ transform_average.translation().matrix() = translation_average;
+ transform_average.linear().matrix() =
+ Eigen::Quaternionf(rotation_average).normalized().toRotationMatrix();
- result.push_back (PoseWithVotes (transform_average, cluster_votes[cluster_i].second));
+ result.push_back(PoseWithVotes(transform_average, cluster_votes[cluster_i].second));
}
}
-
//////////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointSource, typename PointTarget> bool
-pcl::PPFRegistration<PointSource, PointTarget>::posesWithinErrorBounds (Eigen::Affine3f &pose1,
- Eigen::Affine3f &pose2)
+template <typename PointSource, typename PointTarget>
+bool
+pcl::PPFRegistration<PointSource, PointTarget>::posesWithinErrorBounds(
+ Eigen::Affine3f& pose1, Eigen::Affine3f& pose2)
{
- float position_diff = (pose1.translation () - pose2.translation ()).norm ();
- Eigen::AngleAxisf rotation_diff_mat ((pose1.rotation ().inverse ().lazyProduct (pose2.rotation ()).eval()));
+ float position_diff = (pose1.translation() - pose2.translation()).norm();
+ Eigen::AngleAxisf rotation_diff_mat(
+ (pose1.rotation().inverse().lazyProduct(pose2.rotation()).eval()));
- float rotation_diff_angle = std::abs (rotation_diff_mat.angle ());
+ float rotation_diff_angle = std::abs(rotation_diff_mat.angle());
- return (position_diff < clustering_position_diff_threshold_ && rotation_diff_angle < clustering_rotation_diff_threshold_);
+ return (position_diff < clustering_position_diff_threshold_ &&
+ rotation_diff_angle < clustering_rotation_diff_threshold_);
}
-
//////////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointSource, typename PointTarget> bool
-pcl::PPFRegistration<PointSource, PointTarget>::poseWithVotesCompareFunction (const typename pcl::PPFRegistration<PointSource, PointTarget>::PoseWithVotes &a,
- const typename pcl::PPFRegistration<PointSource, PointTarget>::PoseWithVotes &b )
+template <typename PointSource, typename PointTarget>
+bool
+pcl::PPFRegistration<PointSource, PointTarget>::poseWithVotesCompareFunction(
+ const typename pcl::PPFRegistration<PointSource, PointTarget>::PoseWithVotes& a,
+ const typename pcl::PPFRegistration<PointSource, PointTarget>::PoseWithVotes& b)
{
return (a.votes > b.votes);
}
-
//////////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointSource, typename PointTarget> bool
-pcl::PPFRegistration<PointSource, PointTarget>::clusterVotesCompareFunction (const std::pair<std::size_t, unsigned int> &a,
- const std::pair<std::size_t, unsigned int> &b)
+template <typename PointSource, typename PointTarget>
+bool
+pcl::PPFRegistration<PointSource, PointTarget>::clusterVotesCompareFunction(
+ const std::pair<std::size_t, unsigned int>& a,
+ const std::pair<std::size_t, unsigned int>& b)
{
return (a.second > b.second);
}
-//#define PCL_INSTANTIATE_PPFRegistration(PointSource,PointTarget) template class PCL_EXPORTS pcl::PPFRegistration<PointSource, PointTarget>;
+//#define PCL_INSTANTIATE_PPFRegistration(PointSource,PointTarget) template class
+// PCL_EXPORTS pcl::PPFRegistration<PointSource, PointTarget>;
#endif // PCL_REGISTRATION_IMPL_PPF_REGISTRATION_H_
#ifndef PCL_REGISTRATION_IMPL_PYRAMID_FEATURE_MATCHING_H_
#define PCL_REGISTRATION_IMPL_PYRAMID_FEATURE_MATCHING_H_
-#include <pcl/pcl_macros.h>
+#include <pcl/common/point_tests.h> // for pcl::isFinite
#include <pcl/console/print.h>
+#include <pcl/pcl_macros.h>
+namespace pcl {
-namespace pcl
-{
-
-template <typename PointFeature> float
-PyramidFeatureHistogram<PointFeature>::comparePyramidFeatureHistograms (const PyramidFeatureHistogramPtr &pyramid_a,
- const PyramidFeatureHistogramPtr &pyramid_b)
+template <typename PointFeature>
+float
+PyramidFeatureHistogram<PointFeature>::comparePyramidFeatureHistograms(
+ const PyramidFeatureHistogramPtr& pyramid_a,
+ const PyramidFeatureHistogramPtr& pyramid_b)
{
// do a few consistency checks before and during the computation
- if (pyramid_a->nr_dimensions != pyramid_b->nr_dimensions)
- {
- PCL_ERROR ("[pcl::PyramidFeatureMatching::comparePyramidFeatureHistograms] The two given pyramids have different numbers of dimensions: %u vs %u\n", pyramid_a->nr_dimensions, pyramid_b->nr_dimensions);
+ if (pyramid_a->nr_dimensions != pyramid_b->nr_dimensions) {
+ PCL_ERROR("[pcl::PyramidFeatureMatching::comparePyramidFeatureHistograms] The two "
+ "given pyramids have different numbers of dimensions: %u vs %u\n",
+ pyramid_a->nr_dimensions,
+ pyramid_b->nr_dimensions);
return -1;
}
- if (pyramid_a->nr_levels != pyramid_b->nr_levels)
- {
- PCL_ERROR ("[pcl::PyramidFeatureMatching::comparePyramidFeatureHistograms] The two given pyramids have different numbers of levels: %u vs %u\n", pyramid_a->nr_levels, pyramid_b->nr_levels);
+ if (pyramid_a->nr_levels != pyramid_b->nr_levels) {
+ PCL_ERROR("[pcl::PyramidFeatureMatching::comparePyramidFeatureHistograms] The two "
+ "given pyramids have different numbers of levels: %u vs %u\n",
+ pyramid_a->nr_levels,
+ pyramid_b->nr_levels);
return -1;
}
-
// calculate for level 0 first
- if (pyramid_a->hist_levels[0].hist.size () != pyramid_b->hist_levels[0].hist.size ())
- {
- PCL_ERROR ("[pcl::PyramidFeatureMatching::comparePyramidFeatureHistograms] The two given pyramids have different numbers of bins on level 0: %u vs %u\n", pyramid_a->hist_levels[0].hist.size (), pyramid_b->hist_levels[0].hist.size ());
+ if (pyramid_a->hist_levels[0].hist.size() != pyramid_b->hist_levels[0].hist.size()) {
+ PCL_ERROR("[pcl::PyramidFeatureMatching::comparePyramidFeatureHistograms] The two "
+ "given pyramids have different numbers of bins on level 0: %u vs %u\n",
+ pyramid_a->hist_levels[0].hist.size(),
+ pyramid_b->hist_levels[0].hist.size());
return -1;
}
float match_count_level = 0.0f;
- for (std::size_t bin_i = 0; bin_i < pyramid_a->hist_levels[0].hist.size (); ++bin_i)
- {
+ for (std::size_t bin_i = 0; bin_i < pyramid_a->hist_levels[0].hist.size(); ++bin_i) {
if (pyramid_a->hist_levels[0].hist[bin_i] < pyramid_b->hist_levels[0].hist[bin_i])
- match_count_level += static_cast<float> (pyramid_a->hist_levels[0].hist[bin_i]);
+ match_count_level += static_cast<float>(pyramid_a->hist_levels[0].hist[bin_i]);
else
- match_count_level += static_cast<float> (pyramid_b->hist_levels[0].hist[bin_i]);
+ match_count_level += static_cast<float>(pyramid_b->hist_levels[0].hist[bin_i]);
}
-
float match_count = match_count_level;
- for (std::size_t level_i = 1; level_i < pyramid_a->nr_levels; ++level_i)
- {
- if (pyramid_a->hist_levels[level_i].hist.size () != pyramid_b->hist_levels[level_i].hist.size ())
- {
- PCL_ERROR ("[pcl::PyramidFeatureMatching::comparePyramidFeatureHistograms] The two given pyramids have different numbers of bins on level %u: %u vs %u\n", level_i, pyramid_a->hist_levels[level_i].hist.size (), pyramid_b->hist_levels[level_i].hist.size ());
+ for (std::size_t level_i = 1; level_i < pyramid_a->nr_levels; ++level_i) {
+ if (pyramid_a->hist_levels[level_i].hist.size() !=
+ pyramid_b->hist_levels[level_i].hist.size()) {
+ PCL_ERROR(
+ "[pcl::PyramidFeatureMatching::comparePyramidFeatureHistograms] The two "
+ "given pyramids have different numbers of bins on level %u: %u vs %u\n",
+ level_i,
+ pyramid_a->hist_levels[level_i].hist.size(),
+ pyramid_b->hist_levels[level_i].hist.size());
return -1;
}
float match_count_prev_level = match_count_level;
match_count_level = 0.0f;
- for (std::size_t bin_i = 0; bin_i < pyramid_a->hist_levels[level_i].hist.size (); ++bin_i)
- {
- if (pyramid_a->hist_levels[level_i].hist[bin_i] < pyramid_b->hist_levels[level_i].hist[bin_i])
- match_count_level += static_cast<float> (pyramid_a->hist_levels[level_i].hist[bin_i]);
+ for (std::size_t bin_i = 0; bin_i < pyramid_a->hist_levels[level_i].hist.size();
+ ++bin_i) {
+ if (pyramid_a->hist_levels[level_i].hist[bin_i] <
+ pyramid_b->hist_levels[level_i].hist[bin_i])
+ match_count_level +=
+ static_cast<float>(pyramid_a->hist_levels[level_i].hist[bin_i]);
else
- match_count_level += static_cast<float> (pyramid_b->hist_levels[level_i].hist[bin_i]);
+ match_count_level +=
+ static_cast<float>(pyramid_b->hist_levels[level_i].hist[bin_i]);
}
- float level_normalization_factor = powf (2.0f, static_cast<float> (level_i));
- match_count += (match_count_level - match_count_prev_level) / level_normalization_factor;
+ float level_normalization_factor = powf(2.0f, static_cast<float>(level_i));
+ match_count +=
+ (match_count_level - match_count_prev_level) / level_normalization_factor;
}
-
// include self-similarity factors
- float self_similarity_a = static_cast<float> (pyramid_a->nr_features),
- self_similarity_b = static_cast<float> (pyramid_b->nr_features);
- PCL_DEBUG ("[pcl::PyramidFeatureMatching::comparePyramidFeatureHistograms] Self similarity measures: %f, %f\n", self_similarity_a, self_similarity_b);
- match_count /= std::sqrt (self_similarity_a * self_similarity_b);
+ float self_similarity_a = static_cast<float>(pyramid_a->nr_features),
+ self_similarity_b = static_cast<float>(pyramid_b->nr_features);
+ PCL_DEBUG("[pcl::PyramidFeatureMatching::comparePyramidFeatureHistograms] Self "
+ "similarity measures: %f, %f\n",
+ self_similarity_a,
+ self_similarity_b);
+ match_count /= std::sqrt(self_similarity_a * self_similarity_b);
return match_count;
}
-
template <typename PointFeature>
-PyramidFeatureHistogram<PointFeature>::PyramidFeatureHistogram () :
- nr_dimensions (0), nr_levels (0), nr_features (0),
- feature_representation_ (new DefaultPointRepresentation<PointFeature>),
- is_computed_ (false),
- hist_levels ()
-{
-}
-
+PyramidFeatureHistogram<PointFeature>::PyramidFeatureHistogram()
+: nr_dimensions(0)
+, nr_levels(0)
+, nr_features(0)
+, feature_representation_(new DefaultPointRepresentation<PointFeature>)
+, is_computed_(false)
+, hist_levels()
+{}
-template <typename PointFeature> void
-PyramidFeatureHistogram<PointFeature>::PyramidFeatureHistogramLevel::initializeHistogramLevel ()
+template <typename PointFeature>
+void
+PyramidFeatureHistogram<
+ PointFeature>::PyramidFeatureHistogramLevel::initializeHistogramLevel()
{
std::size_t total_vector_size = 1;
- for (std::vector<std::size_t>::iterator dim_it = bins_per_dimension.begin (); dim_it != bins_per_dimension.end (); ++dim_it)
+ for (std::vector<std::size_t>::iterator dim_it = bins_per_dimension.begin();
+ dim_it != bins_per_dimension.end();
+ ++dim_it)
total_vector_size *= *dim_it;
- hist.resize (total_vector_size, 0);
+ hist.resize(total_vector_size, 0);
}
-
-template <typename PointFeature> bool
-PyramidFeatureHistogram<PointFeature>::initializeHistogram ()
+template <typename PointFeature>
+bool
+PyramidFeatureHistogram<PointFeature>::initializeHistogram()
{
// a few consistency checks before starting the computations
- if (!PCLBase<PointFeature>::initCompute ())
- {
- PCL_ERROR ("[pcl::PyramidFeatureHistogram::initializeHistogram] PCLBase initCompute failed\n");
+ if (!PCLBase<PointFeature>::initCompute()) {
+ PCL_ERROR("[pcl::PyramidFeatureHistogram::initializeHistogram] PCLBase initCompute "
+ "failed\n");
return false;
}
- if (dimension_range_input_.empty ())
- {
- PCL_ERROR ("[pcl::PyramidFeatureHistogram::initializeHistogram] Input dimension range was not set\n");
+ if (dimension_range_input_.empty()) {
+ PCL_ERROR("[pcl::PyramidFeatureHistogram::initializeHistogram] Input dimension "
+ "range was not set\n");
return false;
}
- if (dimension_range_target_.empty ())
- {
- PCL_ERROR ("[pcl::PyramidFeatureHistogram::initializeHistogram] Target dimension range was not set\n");
+ if (dimension_range_target_.empty()) {
+ PCL_ERROR("[pcl::PyramidFeatureHistogram::initializeHistogram] Target dimension "
+ "range was not set\n");
return false;
}
- if (dimension_range_input_.size () != dimension_range_target_.size ())
- {
- PCL_ERROR ("[pcl::PyramidFeatureHistogram::initializeHistogram] Input and target dimension ranges do not agree in size: %u vs %u\n",
- dimension_range_input_.size (), dimension_range_target_.size ());
+ if (dimension_range_input_.size() != dimension_range_target_.size()) {
+ PCL_ERROR("[pcl::PyramidFeatureHistogram::initializeHistogram] Input and target "
+ "dimension ranges do not agree in size: %u vs %u\n",
+ dimension_range_input_.size(),
+ dimension_range_target_.size());
return false;
}
-
- nr_dimensions = dimension_range_target_.size ();
- nr_features = input_->size ();
+ nr_dimensions = dimension_range_target_.size();
+ nr_features = input_->size();
float D = 0.0f;
- for (std::vector<std::pair<float, float> >::iterator range_it = dimension_range_target_.begin (); range_it != dimension_range_target_.end (); ++range_it)
- {
+ for (std::vector<std::pair<float, float>>::iterator range_it =
+ dimension_range_target_.begin();
+ range_it != dimension_range_target_.end();
+ ++range_it) {
float aux = range_it->first - range_it->second;
D += aux * aux;
}
- D = std::sqrt (D);
- nr_levels = static_cast<std::size_t> (std::ceil (std::log2(D)));
- PCL_DEBUG ("[pcl::PyramidFeatureHistogram::initializeHistogram] Pyramid will have %u levels with a hyper-parallelepiped diagonal size of %f\n", nr_levels, D);
-
-
- hist_levels.resize (nr_levels);
- for (std::size_t level_i = 0; level_i < nr_levels; ++level_i)
- {
- std::vector<std::size_t> bins_per_dimension (nr_dimensions);
- std::vector<float> bin_step (nr_dimensions);
- for (std::size_t dim_i = 0; dim_i < nr_dimensions; ++dim_i)
- {
- bins_per_dimension[dim_i] =
- static_cast<std::size_t> (std::ceil ((dimension_range_target_[dim_i].second - dimension_range_target_[dim_i].first) / (powf (2.0f, static_cast<float> (level_i)) * std::sqrt (static_cast<float> (nr_dimensions)))));
- bin_step[dim_i] = powf (2.0f, static_cast<float> (level_i)) * std::sqrt (static_cast<float> (nr_dimensions));
+ D = std::sqrt(D);
+ nr_levels = static_cast<std::size_t>(std::ceil(std::log2(D)));
+ PCL_DEBUG("[pcl::PyramidFeatureHistogram::initializeHistogram] Pyramid will have %u "
+ "levels with a hyper-parallelepiped diagonal size of %f\n",
+ nr_levels,
+ D);
+
+ hist_levels.resize(nr_levels);
+ for (std::size_t level_i = 0; level_i < nr_levels; ++level_i) {
+ std::vector<std::size_t> bins_per_dimension(nr_dimensions);
+ std::vector<float> bin_step(nr_dimensions);
+ for (std::size_t dim_i = 0; dim_i < nr_dimensions; ++dim_i) {
+ bins_per_dimension[dim_i] = static_cast<std::size_t>(
+ std::ceil((dimension_range_target_[dim_i].second -
+ dimension_range_target_[dim_i].first) /
+ (powf(2.0f, static_cast<float>(level_i)) *
+ std::sqrt(static_cast<float>(nr_dimensions)))));
+ bin_step[dim_i] = powf(2.0f, static_cast<float>(level_i)) *
+ std::sqrt(static_cast<float>(nr_dimensions));
}
- hist_levels[level_i] = PyramidFeatureHistogramLevel (bins_per_dimension, bin_step);
+ hist_levels[level_i] = PyramidFeatureHistogramLevel(bins_per_dimension, bin_step);
- PCL_DEBUG ("[pcl::PyramidFeatureHistogram::initializeHistogram] Created vector of size %u at level %u\nwith #bins per dimension:", hist_levels.back ().hist.size (), level_i);
+ PCL_DEBUG("[pcl::PyramidFeatureHistogram::initializeHistogram] Created vector of "
+ "size %u at level %u\nwith #bins per dimension:",
+ hist_levels.back().hist.size(),
+ level_i);
for (std::size_t dim_i = 0; dim_i < nr_dimensions; ++dim_i)
- PCL_DEBUG ("%u ", bins_per_dimension[dim_i]);
- PCL_DEBUG ("\n");
+ PCL_DEBUG("%u ", bins_per_dimension[dim_i]);
+ PCL_DEBUG("\n");
}
return true;
}
-
-template <typename PointFeature> unsigned int&
-PyramidFeatureHistogram<PointFeature>::at (std::vector<std::size_t> &access,
- std::size_t &level)
+template <typename PointFeature>
+unsigned int&
+PyramidFeatureHistogram<PointFeature>::at(std::vector<std::size_t>& access,
+ std::size_t& level)
{
- if (access.size () != nr_dimensions)
- {
- PCL_ERROR ("[pcl::PyramidFeatureHistogram::at] Cannot access histogram position because the access point does not have the right number of dimensions\n");
- return hist_levels.front ().hist.front ();
+ if (access.size() != nr_dimensions) {
+ PCL_ERROR(
+ "[pcl::PyramidFeatureHistogram::at] Cannot access histogram position because "
+ "the access point does not have the right number of dimensions\n");
+ return hist_levels.front().hist.front();
}
- if (level >= hist_levels.size ())
- {
- PCL_ERROR ("[pcl::PyramidFeatureHistogram::at] Trying to access a too large level\n");
- return hist_levels.front ().hist.front ();
+ if (level >= hist_levels.size()) {
+ PCL_ERROR(
+ "[pcl::PyramidFeatureHistogram::at] Trying to access a too large level\n");
+ return hist_levels.front().hist.front();
}
std::size_t vector_position = 0;
std::size_t dim_accumulator = 1;
- for (int i = static_cast<int> (access.size ()) - 1; i >= 0; --i)
- {
+ for (int i = static_cast<int>(access.size()) - 1; i >= 0; --i) {
vector_position += access[i] * dim_accumulator;
dim_accumulator *= hist_levels[level].bins_per_dimension[i];
}
return hist_levels[level].hist[vector_position];
}
-
-template <typename PointFeature> unsigned int&
-PyramidFeatureHistogram<PointFeature>::at (std::vector<float> &feature,
- std::size_t &level)
+template <typename PointFeature>
+unsigned int&
+PyramidFeatureHistogram<PointFeature>::at(std::vector<float>& feature,
+ std::size_t& level)
{
- if (feature.size () != nr_dimensions)
- {
- PCL_ERROR ("[pcl::PyramidFeatureHistogram::at] The given feature vector does not match the feature dimensions of the pyramid histogram: %u vs %u\n", feature.size (), nr_dimensions);
- return hist_levels.front ().hist.front ();
+ if (feature.size() != nr_dimensions) {
+ PCL_ERROR("[pcl::PyramidFeatureHistogram::at] The given feature vector does not "
+ "match the feature dimensions of the pyramid histogram: %u vs %u\n",
+ feature.size(),
+ nr_dimensions);
+ return hist_levels.front().hist.front();
}
- if (level >= hist_levels.size ())
- {
- PCL_ERROR ("[pcl::PyramidFeatureHistogram::at] Trying to access a too large level\n");
- return hist_levels.front ().hist.front ();
+ if (level >= hist_levels.size()) {
+ PCL_ERROR(
+ "[pcl::PyramidFeatureHistogram::at] Trying to access a too large level\n");
+ return hist_levels.front().hist.front();
}
std::vector<std::size_t> access;
for (std::size_t dim_i = 0; dim_i < nr_dimensions; ++dim_i)
- access.push_back (static_cast<std::size_t> (std::floor ((feature[dim_i] - dimension_range_target_[dim_i].first) / hist_levels[level].bin_step[dim_i])));
+ access.push_back(static_cast<std::size_t>(
+ std::floor((feature[dim_i] - dimension_range_target_[dim_i].first) /
+ hist_levels[level].bin_step[dim_i])));
- return at (access, level);
+ return at(access, level);
}
-
-template <typename PointFeature> void
-PyramidFeatureHistogram<PointFeature>::convertFeatureToVector (const PointFeature &feature,
- std::vector<float> &feature_vector)
+template <typename PointFeature>
+void
+PyramidFeatureHistogram<PointFeature>::convertFeatureToVector(
+ const PointFeature& feature, std::vector<float>& feature_vector)
{
// convert feature to vector representation
- feature_vector.resize (feature_representation_->getNumberOfDimensions ());
- feature_representation_->vectorize (feature, feature_vector);
+ feature_vector.resize(feature_representation_->getNumberOfDimensions());
+ feature_representation_->vectorize(feature, feature_vector);
// adapt the values from the input range to the target range
- for (std::size_t i = 0; i < feature_vector.size (); ++i)
- feature_vector[i] = (feature_vector[i] - dimension_range_input_[i].first) / (dimension_range_input_[i].second - dimension_range_input_[i].first) *
- (dimension_range_target_[i].second - dimension_range_target_[i].first) + dimension_range_target_[i].first;
+ for (std::size_t i = 0; i < feature_vector.size(); ++i)
+ feature_vector[i] =
+ (feature_vector[i] - dimension_range_input_[i].first) /
+ (dimension_range_input_[i].second - dimension_range_input_[i].first) *
+ (dimension_range_target_[i].second - dimension_range_target_[i].first) +
+ dimension_range_target_[i].first;
}
-
-template <typename PointFeature> void
-PyramidFeatureHistogram<PointFeature>::compute ()
+template <typename PointFeature>
+void
+PyramidFeatureHistogram<PointFeature>::compute()
{
- if (!initializeHistogram ())
+ if (!initializeHistogram())
return;
- for (const auto& point: *input_)
- {
+ for (const auto& point : *input_) {
std::vector<float> feature_vector;
- convertFeatureToVector (point, feature_vector);
- addFeature (feature_vector);
+ // NaN is converted to very high number that gives out of bound exception.
+ if (!pcl::isFinite(point))
+ continue;
+ convertFeatureToVector(point, feature_vector);
+ addFeature(feature_vector);
}
is_computed_ = true;
}
-
-template <typename PointFeature> void
-PyramidFeatureHistogram<PointFeature>::addFeature (std::vector<float> &feature)
+template <typename PointFeature>
+void
+PyramidFeatureHistogram<PointFeature>::addFeature(std::vector<float>& feature)
{
for (std::size_t level_i = 0; level_i < nr_levels; ++level_i)
- at (feature, level_i) ++;
+ at(feature, level_i)++;
}
} // namespace pcl
-#define PCL_INSTANTIATE_PyramidFeatureHistogram(PointFeature) template class PCL_EXPORTS pcl::PyramidFeatureHistogram<PointFeature>;
+#define PCL_INSTANTIATE_PyramidFeatureHistogram(PointFeature) \
+ template class PCL_EXPORTS pcl::PyramidFeatureHistogram<PointFeature>;
#endif /* PCL_REGISTRATION_IMPL_PYRAMID_FEATURE_MATCHING_H_ */
-
#pragma once
-namespace pcl
+namespace pcl {
+
+template <typename PointSource, typename PointTarget, typename Scalar>
+inline void
+Registration<PointSource, PointTarget, Scalar>::setInputSource(
+ const PointCloudSourceConstPtr& cloud)
{
+ if (cloud->points.empty()) {
+ PCL_ERROR("[pcl::%s::setInputSource] Invalid or empty point cloud dataset given!\n",
+ getClassName().c_str());
+ return;
+ }
+ source_cloud_updated_ = true;
+ PCLBase<PointSource>::setInputCloud(cloud);
+}
-template <typename PointSource, typename PointTarget, typename Scalar> inline void
-Registration<PointSource, PointTarget, Scalar>::setInputTarget (const PointCloudTargetConstPtr &cloud)
+template <typename PointSource, typename PointTarget, typename Scalar>
+inline void
+Registration<PointSource, PointTarget, Scalar>::setInputTarget(
+ const PointCloudTargetConstPtr& cloud)
{
- if (cloud->points.empty ())
- {
- PCL_ERROR ("[pcl::%s::setInputTarget] Invalid or empty point cloud dataset given!\n", getClassName ().c_str ());
+ if (cloud->points.empty()) {
+ PCL_ERROR("[pcl::%s::setInputTarget] Invalid or empty point cloud dataset given!\n",
+ getClassName().c_str());
return;
}
target_ = cloud;
target_cloud_updated_ = true;
}
-
-template <typename PointSource, typename PointTarget, typename Scalar> bool
-Registration<PointSource, PointTarget, Scalar>::initCompute ()
+template <typename PointSource, typename PointTarget, typename Scalar>
+bool
+Registration<PointSource, PointTarget, Scalar>::initCompute()
{
- if (!target_)
- {
- PCL_ERROR ("[pcl::registration::%s::compute] No input target dataset was given!\n", getClassName ().c_str ());
+ if (!target_) {
+ PCL_ERROR("[pcl::registration::%s::compute] No input target dataset was given!\n",
+ getClassName().c_str());
return (false);
}
// Only update target kd-tree if a new target cloud was set
- if (target_cloud_updated_ && !force_no_recompute_)
- {
- tree_->setInputCloud (target_);
+ if (target_cloud_updated_ && !force_no_recompute_) {
+ tree_->setInputCloud(target_);
target_cloud_updated_ = false;
}
// Update the correspondence estimation
- if (correspondence_estimation_)
- {
- correspondence_estimation_->setSearchMethodTarget (tree_, force_no_recompute_);
- correspondence_estimation_->setSearchMethodSource (tree_reciprocal_, force_no_recompute_reciprocal_);
+ if (correspondence_estimation_) {
+ correspondence_estimation_->setSearchMethodTarget(tree_, force_no_recompute_);
+ correspondence_estimation_->setSearchMethodSource(tree_reciprocal_,
+ force_no_recompute_reciprocal_);
}
- // Note: we /cannot/ update the search method on all correspondence rejectors, because we know
- // nothing about them. If they should be cached, they must be cached individually.
+ // Note: we /cannot/ update the search method on all correspondence rejectors, because
+ // we know nothing about them. If they should be cached, they must be cached
+ // individually.
- return (PCLBase<PointSource>::initCompute ());
+ return (PCLBase<PointSource>::initCompute());
}
-
-template <typename PointSource, typename PointTarget, typename Scalar> bool
-Registration<PointSource, PointTarget, Scalar>::initComputeReciprocal ()
+template <typename PointSource, typename PointTarget, typename Scalar>
+bool
+Registration<PointSource, PointTarget, Scalar>::initComputeReciprocal()
{
- if (!input_)
- {
- PCL_ERROR ("[pcl::registration::%s::compute] No input source dataset was given!\n", getClassName ().c_str ());
+ if (!input_) {
+ PCL_ERROR("[pcl::registration::%s::compute] No input source dataset was given!\n",
+ getClassName().c_str());
return (false);
}
- if (source_cloud_updated_ && !force_no_recompute_reciprocal_)
- {
- tree_reciprocal_->setInputCloud (input_);
+ if (source_cloud_updated_ && !force_no_recompute_reciprocal_) {
+ tree_reciprocal_->setInputCloud(input_);
source_cloud_updated_ = false;
}
return (true);
}
-
-template <typename PointSource, typename PointTarget, typename Scalar> inline double
-Registration<PointSource, PointTarget, Scalar>::getFitnessScore (
- const std::vector<float> &distances_a,
- const std::vector<float> &distances_b)
+template <typename PointSource, typename PointTarget, typename Scalar>
+inline double
+Registration<PointSource, PointTarget, Scalar>::getFitnessScore(
+ const std::vector<float>& distances_a, const std::vector<float>& distances_b)
{
- unsigned int nr_elem = static_cast<unsigned int> (std::min (distances_a.size (), distances_b.size ()));
- Eigen::VectorXf map_a = Eigen::VectorXf::Map (&distances_a[0], nr_elem);
- Eigen::VectorXf map_b = Eigen::VectorXf::Map (&distances_b[0], nr_elem);
- return (static_cast<double> ((map_a - map_b).sum ()) / static_cast<double> (nr_elem));
+ unsigned int nr_elem =
+ static_cast<unsigned int>(std::min(distances_a.size(), distances_b.size()));
+ Eigen::VectorXf map_a = Eigen::VectorXf::Map(&distances_a[0], nr_elem);
+ Eigen::VectorXf map_b = Eigen::VectorXf::Map(&distances_b[0], nr_elem);
+ return (static_cast<double>((map_a - map_b).sum()) / static_cast<double>(nr_elem));
}
-
-template <typename PointSource, typename PointTarget, typename Scalar> inline double
-Registration<PointSource, PointTarget, Scalar>::getFitnessScore (double max_range)
+template <typename PointSource, typename PointTarget, typename Scalar>
+inline double
+Registration<PointSource, PointTarget, Scalar>::getFitnessScore(double max_range)
{
double fitness_score = 0.0;
// Transform the input dataset using the final transformation
PointCloudSource input_transformed;
- transformPointCloud (*input_, input_transformed, final_transformation_);
+ transformPointCloud(*input_, input_transformed, final_transformation_);
- std::vector<int> nn_indices (1);
- std::vector<float> nn_dists (1);
+ pcl::Indices nn_indices(1);
+ std::vector<float> nn_dists(1);
// For each point in the source dataset
int nr = 0;
- for (const auto& point: input_transformed)
- {
+ for (const auto& point : input_transformed) {
// Find its nearest neighbor in the target
- tree_->nearestKSearch (point, 1, nn_indices, nn_dists);
+ tree_->nearestKSearch(point, 1, nn_indices, nn_dists);
// Deal with occlusions (incomplete targets)
- if (nn_dists[0] <= max_range)
- {
+ if (nn_dists[0] <= max_range) {
// Add to the fitness score
fitness_score += nn_dists[0];
nr++;
if (nr > 0)
return (fitness_score / nr);
- return (std::numeric_limits<double>::max ());
-
+ return (std::numeric_limits<double>::max());
}
-
-template <typename PointSource, typename PointTarget, typename Scalar> inline void
-Registration<PointSource, PointTarget, Scalar>::align (PointCloudSource &output)
+template <typename PointSource, typename PointTarget, typename Scalar>
+inline void
+Registration<PointSource, PointTarget, Scalar>::align(PointCloudSource& output)
{
- align (output, Matrix4::Identity ());
+ align(output, Matrix4::Identity());
}
-
-template <typename PointSource, typename PointTarget, typename Scalar> inline void
-Registration<PointSource, PointTarget, Scalar>::align (PointCloudSource &output, const Matrix4& guess)
+template <typename PointSource, typename PointTarget, typename Scalar>
+inline void
+Registration<PointSource, PointTarget, Scalar>::align(PointCloudSource& output,
+ const Matrix4& guess)
{
- if (!initCompute ())
+ if (!initCompute())
return;
// Resize the output dataset
- output.resize (indices_->size ());
+ output.resize(indices_->size());
// Copy the header
- output.header = input_->header;
+ output.header = input_->header;
// Check if the output will be computed for all points or only a subset
- if (indices_->size () != input_->size ())
- {
- output.width = indices_->size ();
- output.height = 1;
+ if (indices_->size() != input_->size()) {
+ output.width = indices_->size();
+ output.height = 1;
}
- else
- {
- output.width = static_cast<std::uint32_t> (input_->width);
- output.height = input_->height;
+ else {
+ output.width = static_cast<std::uint32_t>(input_->width);
+ output.height = input_->height;
}
output.is_dense = input_->is_dense;
// Copy the point data to output
- for (std::size_t i = 0; i < indices_->size (); ++i)
+ for (std::size_t i = 0; i < indices_->size(); ++i)
output[i] = (*input_)[(*indices_)[i]];
// Set the internal point representation of choice unless otherwise noted
- if (point_representation_ && !force_no_recompute_)
- tree_->setPointRepresentation (point_representation_);
+ if (point_representation_ && !force_no_recompute_)
+ tree_->setPointRepresentation(point_representation_);
// Perform the actual transformation computation
converged_ = false;
- final_transformation_ = transformation_ = previous_transformation_ = Matrix4::Identity ();
+ final_transformation_ = transformation_ = previous_transformation_ =
+ Matrix4::Identity();
- // Right before we estimate the transformation, we set all the point.data[3] values to 1 to aid the rigid
- // transformation
- for (std::size_t i = 0; i < indices_->size (); ++i)
+ // Right before we estimate the transformation, we set all the point.data[3] values to
+ // 1 to aid the rigid transformation
+ for (std::size_t i = 0; i < indices_->size(); ++i)
output[i].data[3] = 1.0;
- computeTransformation (output, guess);
+ computeTransformation(output, guess);
- deinitCompute ();
+ deinitCompute();
}
} // namespace pcl
-
#ifndef PCL_REGISTRATION_SAMPLE_CONSENSUS_PREREJECTIVE_HPP_
#define PCL_REGISTRATION_SAMPLE_CONSENSUS_PREREJECTIVE_HPP_
+namespace pcl {
-namespace pcl
+template <typename PointSource, typename PointTarget, typename FeatureT>
+void
+SampleConsensusPrerejective<PointSource, PointTarget, FeatureT>::setSourceFeatures(
+ const FeatureCloudConstPtr& features)
{
-
-template <typename PointSource, typename PointTarget, typename FeatureT> void
-SampleConsensusPrerejective<PointSource, PointTarget, FeatureT>::setSourceFeatures (const FeatureCloudConstPtr &features)
-{
- if (features == nullptr || features->empty ())
- {
- PCL_ERROR ("[pcl::%s::setSourceFeatures] Invalid or empty point cloud dataset given!\n", getClassName ().c_str ());
+ if (features == nullptr || features->empty()) {
+ PCL_ERROR(
+ "[pcl::%s::setSourceFeatures] Invalid or empty point cloud dataset given!\n",
+ getClassName().c_str());
return;
}
input_features_ = features;
}
-
-template <typename PointSource, typename PointTarget, typename FeatureT> void
-SampleConsensusPrerejective<PointSource, PointTarget, FeatureT>::setTargetFeatures (const FeatureCloudConstPtr &features)
+template <typename PointSource, typename PointTarget, typename FeatureT>
+void
+SampleConsensusPrerejective<PointSource, PointTarget, FeatureT>::setTargetFeatures(
+ const FeatureCloudConstPtr& features)
{
- if (features == nullptr || features->empty ())
- {
- PCL_ERROR ("[pcl::%s::setTargetFeatures] Invalid or empty point cloud dataset given!\n", getClassName ().c_str ());
+ if (features == nullptr || features->empty()) {
+ PCL_ERROR(
+ "[pcl::%s::setTargetFeatures] Invalid or empty point cloud dataset given!\n",
+ getClassName().c_str());
return;
}
target_features_ = features;
- feature_tree_->setInputCloud (target_features_);
+ feature_tree_->setInputCloud(target_features_);
}
-
-template <typename PointSource, typename PointTarget, typename FeatureT> void
-SampleConsensusPrerejective<PointSource, PointTarget, FeatureT>::selectSamples (
- const PointCloudSource &cloud, int nr_samples, std::vector<int> &sample_indices)
+template <typename PointSource, typename PointTarget, typename FeatureT>
+void
+SampleConsensusPrerejective<PointSource, PointTarget, FeatureT>::selectSamples(
+ const PointCloudSource& cloud, int nr_samples, pcl::Indices& sample_indices)
{
- if (nr_samples > static_cast<int> (cloud.size ()))
- {
- PCL_ERROR ("[pcl::%s::selectSamples] ", getClassName ().c_str ());
+ if (nr_samples > static_cast<int>(cloud.size())) {
+ PCL_ERROR("[pcl::%s::selectSamples] ", getClassName().c_str());
PCL_ERROR("The number of samples (%d) must not be greater than the number of "
"points (%zu)!\n",
nr_samples,
return;
}
- sample_indices.resize (nr_samples);
+ sample_indices.resize(nr_samples);
int temp_sample;
// Draw random samples until n samples is reached
- for (int i = 0; i < nr_samples; i++)
- {
+ for (int i = 0; i < nr_samples; i++) {
// Select a random number
- sample_indices[i] = getRandomIndex (static_cast<int> (cloud.size ()) - i);
+ sample_indices[i] = getRandomIndex(static_cast<int>(cloud.size()) - i);
// Run trough list of numbers, starting at the lowest, to avoid duplicates
- for (int j = 0; j < i; j++)
- {
- // Move value up if it is higher than previous selections to ensure true randomness
- if (sample_indices[i] >= sample_indices[j])
- {
+ for (int j = 0; j < i; j++) {
+ // Move value up if it is higher than previous selections to ensure true
+ // randomness
+ if (sample_indices[i] >= sample_indices[j]) {
sample_indices[i]++;
}
- else
- {
- // The new number is lower, place it at the correct point and break for a sorted list
+ else {
+ // The new number is lower, place it at the correct point and break for a sorted
+ // list
temp_sample = sample_indices[i];
for (int k = i; k > j; k--)
sample_indices[k] = sample_indices[k - 1];
}
}
-
-template <typename PointSource, typename PointTarget, typename FeatureT> void
-SampleConsensusPrerejective<PointSource, PointTarget, FeatureT>::findSimilarFeatures (
- const std::vector<int> &sample_indices,
- std::vector<std::vector<int> >& similar_features,
- std::vector<int> &corresponding_indices)
+template <typename PointSource, typename PointTarget, typename FeatureT>
+void
+SampleConsensusPrerejective<PointSource, PointTarget, FeatureT>::findSimilarFeatures(
+ const pcl::Indices& sample_indices,
+ std::vector<pcl::Indices>& similar_features,
+ pcl::Indices& corresponding_indices)
{
// Allocate results
- corresponding_indices.resize (sample_indices.size ());
- std::vector<float> nn_distances (k_correspondences_);
+ corresponding_indices.resize(sample_indices.size());
+ std::vector<float> nn_distances(k_correspondences_);
// Loop over the sampled features
- for (std::size_t i = 0; i < sample_indices.size (); ++i)
- {
+ for (std::size_t i = 0; i < sample_indices.size(); ++i) {
// Current feature index
- const int idx = sample_indices[i];
+ const auto& idx = sample_indices[i];
- // Find the k nearest feature neighbors to the sampled input feature if they are not in the cache already
- if (similar_features[idx].empty ())
- feature_tree_->nearestKSearch (*input_features_, idx, k_correspondences_, similar_features[idx], nn_distances);
+ // Find the k nearest feature neighbors to the sampled input feature if they are not
+ // in the cache already
+ if (similar_features[idx].empty())
+ feature_tree_->nearestKSearch(*input_features_,
+ idx,
+ k_correspondences_,
+ similar_features[idx],
+ nn_distances);
// Select one at random and add it to corresponding_indices
if (k_correspondences_ == 1)
corresponding_indices[i] = similar_features[idx][0];
else
- corresponding_indices[i] = similar_features[idx][getRandomIndex (k_correspondences_)];
+ corresponding_indices[i] =
+ similar_features[idx][getRandomIndex(k_correspondences_)];
}
}
-
-template <typename PointSource, typename PointTarget, typename FeatureT> void
-SampleConsensusPrerejective<PointSource, PointTarget, FeatureT>::computeTransformation (PointCloudSource &output, const Eigen::Matrix4f& guess)
+template <typename PointSource, typename PointTarget, typename FeatureT>
+void
+SampleConsensusPrerejective<PointSource, PointTarget, FeatureT>::computeTransformation(
+ PointCloudSource& output, const Eigen::Matrix4f& guess)
{
// Some sanity checks first
- if (!input_features_)
- {
- PCL_ERROR ("[pcl::%s::computeTransformation] ", getClassName ().c_str ());
- PCL_ERROR ("No source features were given! Call setSourceFeatures before aligning.\n");
+ if (!input_features_) {
+ PCL_ERROR("[pcl::%s::computeTransformation] ", getClassName().c_str());
+ PCL_ERROR(
+ "No source features were given! Call setSourceFeatures before aligning.\n");
return;
}
- if (!target_features_)
- {
- PCL_ERROR ("[pcl::%s::computeTransformation] ", getClassName ().c_str ());
- PCL_ERROR ("No target features were given! Call setTargetFeatures before aligning.\n");
+ if (!target_features_) {
+ PCL_ERROR("[pcl::%s::computeTransformation] ", getClassName().c_str());
+ PCL_ERROR(
+ "No target features were given! Call setTargetFeatures before aligning.\n");
return;
}
- if (input_->size () != input_features_->size ())
- {
- PCL_ERROR ("[pcl::%s::computeTransformation] ", getClassName ().c_str ());
- PCL_ERROR ("The source points and source feature points need to be in a one-to-one relationship! Current input cloud sizes: %ld vs %ld.\n",
- input_->size (), input_features_->size ());
+ if (input_->size() != input_features_->size()) {
+ PCL_ERROR("[pcl::%s::computeTransformation] ", getClassName().c_str());
+ PCL_ERROR("The source points and source feature points need to be in a one-to-one "
+ "relationship! Current input cloud sizes: %ld vs %ld.\n",
+ input_->size(),
+ input_features_->size());
return;
}
- if (target_->size () != target_features_->size ())
- {
- PCL_ERROR ("[pcl::%s::computeTransformation] ", getClassName ().c_str ());
- PCL_ERROR ("The target points and target feature points need to be in a one-to-one relationship! Current input cloud sizes: %ld vs %ld.\n",
- target_->size (), target_features_->size ());
+ if (target_->size() != target_features_->size()) {
+ PCL_ERROR("[pcl::%s::computeTransformation] ", getClassName().c_str());
+ PCL_ERROR("The target points and target feature points need to be in a one-to-one "
+ "relationship! Current input cloud sizes: %ld vs %ld.\n",
+ target_->size(),
+ target_features_->size());
return;
}
- if (inlier_fraction_ < 0.0f || inlier_fraction_ > 1.0f)
- {
- PCL_ERROR ("[pcl::%s::computeTransformation] ", getClassName ().c_str ());
- PCL_ERROR ("Illegal inlier fraction %f, must be in [0,1]!\n",
- inlier_fraction_);
+ if (inlier_fraction_ < 0.0f || inlier_fraction_ > 1.0f) {
+ PCL_ERROR("[pcl::%s::computeTransformation] ", getClassName().c_str());
+ PCL_ERROR("Illegal inlier fraction %f, must be in [0,1]!\n", inlier_fraction_);
return;
}
- const float similarity_threshold = correspondence_rejector_poly_->getSimilarityThreshold ();
- if (similarity_threshold < 0.0f || similarity_threshold >= 1.0f)
- {
- PCL_ERROR ("[pcl::%s::computeTransformation] ", getClassName ().c_str ());
- PCL_ERROR ("Illegal prerejection similarity threshold %f, must be in [0,1[!\n",
- similarity_threshold);
+ const float similarity_threshold =
+ correspondence_rejector_poly_->getSimilarityThreshold();
+ if (similarity_threshold < 0.0f || similarity_threshold >= 1.0f) {
+ PCL_ERROR("[pcl::%s::computeTransformation] ", getClassName().c_str());
+ PCL_ERROR("Illegal prerejection similarity threshold %f, must be in [0,1[!\n",
+ similarity_threshold);
return;
}
- if (k_correspondences_ <= 0)
- {
- PCL_ERROR ("[pcl::%s::computeTransformation] ", getClassName ().c_str ());
- PCL_ERROR ("Illegal correspondence randomness %d, must be > 0!\n",
- k_correspondences_);
+ if (k_correspondences_ <= 0) {
+ PCL_ERROR("[pcl::%s::computeTransformation] ", getClassName().c_str());
+ PCL_ERROR("Illegal correspondence randomness %d, must be > 0!\n",
+ k_correspondences_);
return;
}
- // Initialize prerejector (similarity threshold already set to default value in constructor)
- correspondence_rejector_poly_->setInputSource (input_);
- correspondence_rejector_poly_->setInputTarget (target_);
- correspondence_rejector_poly_->setCardinality (nr_samples_);
+ // Initialize prerejector (similarity threshold already set to default value in
+ // constructor)
+ correspondence_rejector_poly_->setInputSource(input_);
+ correspondence_rejector_poly_->setInputTarget(target_);
+ correspondence_rejector_poly_->setCardinality(nr_samples_);
int num_rejections = 0; // For debugging
// Initialize results
final_transformation_ = guess;
- inliers_.clear ();
- float lowest_error = std::numeric_limits<float>::max ();
+ inliers_.clear();
+ float lowest_error = std::numeric_limits<float>::max();
converged_ = false;
// Temporaries
- std::vector<int> inliers;
+ pcl::Indices inliers;
float inlier_fraction;
float error;
// If guess is not the Identity matrix we check it
- if (!guess.isApprox (Eigen::Matrix4f::Identity (), 0.01f))
- {
- getFitness (inliers, error);
- inlier_fraction = static_cast<float> (inliers.size ()) / static_cast<float> (input_->size ());
+ if (!guess.isApprox(Eigen::Matrix4f::Identity(), 0.01f)) {
+ getFitness(inliers, error);
+ inlier_fraction =
+ static_cast<float>(inliers.size()) / static_cast<float>(input_->size());
- if (inlier_fraction >= inlier_fraction_ && error < lowest_error)
- {
+ if (inlier_fraction >= inlier_fraction_ && error < lowest_error) {
inliers_ = inliers;
lowest_error = error;
converged_ = true;
}
// Feature correspondence cache
- std::vector<std::vector<int> > similar_features (input_->size ());
+ std::vector<pcl::Indices> similar_features(input_->size());
// Start
- for (int i = 0; i < max_iterations_; ++i)
- {
+ for (int i = 0; i < max_iterations_; ++i) {
// Temporary containers
- std::vector<int> sample_indices;
- std::vector<int> corresponding_indices;
+ pcl::Indices sample_indices;
+ pcl::Indices corresponding_indices;
// Draw nr_samples_ random samples
- selectSamples (*input_, nr_samples_, sample_indices);
+ selectSamples(*input_, nr_samples_, sample_indices);
// Find corresponding features in the target cloud
- findSimilarFeatures (sample_indices, similar_features, corresponding_indices);
+ findSimilarFeatures(sample_indices, similar_features, corresponding_indices);
// Apply prerejection
- if (!correspondence_rejector_poly_->thresholdPolygon (sample_indices, corresponding_indices))
- {
+ if (!correspondence_rejector_poly_->thresholdPolygon(sample_indices,
+ corresponding_indices)) {
++num_rejections;
continue;
}
// Estimate the transform from the correspondences, write to transformation_
- transformation_estimation_->estimateRigidTransformation (*input_, sample_indices, *target_, corresponding_indices, transformation_);
+ transformation_estimation_->estimateRigidTransformation(
+ *input_, sample_indices, *target_, corresponding_indices, transformation_);
// Take a backup of previous result
const Matrix4 final_transformation_prev = final_transformation_;
final_transformation_ = transformation_;
// Transform the input and compute the error (uses input_ and final_transformation_)
- getFitness (inliers, error);
+ getFitness(inliers, error);
// Restore previous result
final_transformation_ = final_transformation_prev;
// If the new fit is better, update results
- inlier_fraction = static_cast<float> (inliers.size ()) / static_cast<float> (input_->size ());
+ inlier_fraction =
+ static_cast<float>(inliers.size()) / static_cast<float>(input_->size());
// Update result if pose hypothesis is better
- if (inlier_fraction >= inlier_fraction_ && error < lowest_error)
- {
+ if (inlier_fraction >= inlier_fraction_ && error < lowest_error) {
inliers_ = inliers;
lowest_error = error;
converged_ = true;
// Apply the final transformation
if (converged_)
- transformPointCloud (*input_, output, final_transformation_);
+ transformPointCloud(*input_, output, final_transformation_);
// Debug output
- PCL_DEBUG("[pcl::%s::computeTransformation] Rejected %i out of %i generated pose hypotheses.\n",
- getClassName ().c_str (), num_rejections, max_iterations_);
+ PCL_DEBUG("[pcl::%s::computeTransformation] Rejected %i out of %i generated pose "
+ "hypotheses.\n",
+ getClassName().c_str(),
+ num_rejections,
+ max_iterations_);
}
-
-template <typename PointSource, typename PointTarget, typename FeatureT> void
-SampleConsensusPrerejective<PointSource, PointTarget, FeatureT>::getFitness (std::vector<int>& inliers, float& fitness_score)
+template <typename PointSource, typename PointTarget, typename FeatureT>
+void
+SampleConsensusPrerejective<PointSource, PointTarget, FeatureT>::getFitness(
+ pcl::Indices& inliers, float& fitness_score)
{
// Initialize variables
- inliers.clear ();
- inliers.reserve (input_->size ());
+ inliers.clear();
+ inliers.reserve(input_->size());
fitness_score = 0.0f;
// Use squared distance for comparison with NN search results
// Transform the input dataset using the final transformation
PointCloudSource input_transformed;
- input_transformed.resize (input_->size ());
- transformPointCloud (*input_, input_transformed, final_transformation_);
+ input_transformed.resize(input_->size());
+ transformPointCloud(*input_, input_transformed, final_transformation_);
// For each point in the source dataset
- for (std::size_t i = 0; i < input_transformed.size (); ++i)
- {
+ for (std::size_t i = 0; i < input_transformed.size(); ++i) {
// Find its nearest neighbor in the target
- std::vector<int> nn_indices (1);
- std::vector<float> nn_dists (1);
- tree_->nearestKSearch (input_transformed[i], 1, nn_indices, nn_dists);
+ pcl::Indices nn_indices(1);
+ std::vector<float> nn_dists(1);
+ tree_->nearestKSearch(input_transformed[i], 1, nn_indices, nn_dists);
// Check if point is an inlier
- if (nn_dists[0] < max_range)
- {
+ if (nn_dists[0] < max_range) {
// Update inliers
- inliers.push_back (static_cast<int> (i));
+ inliers.push_back(i);
// Update fitness score
fitness_score += nn_dists[0];
}
// Calculate MSE
- if (!inliers.empty ())
- fitness_score /= static_cast<float> (inliers.size ());
+ if (!inliers.empty())
+ fitness_score /= static_cast<float>(inliers.size());
else
- fitness_score = std::numeric_limits<float>::max ();
+ fitness_score = std::numeric_limits<float>::max();
}
} // namespace pcl
#endif
-
#ifndef PCL_REGISTRATION_TRANSFORMATION_ESTIMATION_2D_HPP_
#define PCL_REGISTRATION_TRANSFORMATION_ESTIMATION_2D_HPP_
+namespace pcl {
-namespace pcl
-{
-
-namespace registration
-{
+namespace registration {
-template <typename PointSource, typename PointTarget, typename Scalar> inline void
-TransformationEstimation2D<PointSource, PointTarget, Scalar>::estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- Matrix4 &transformation_matrix) const
+template <typename PointSource, typename PointTarget, typename Scalar>
+inline void
+TransformationEstimation2D<PointSource, PointTarget, Scalar>::
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ Matrix4& transformation_matrix) const
{
- const auto nr_points = cloud_src.size ();
- if (cloud_tgt.size () != nr_points)
- {
+ const auto nr_points = cloud_src.size();
+ if (cloud_tgt.size() != nr_points) {
PCL_ERROR("[pcl::TransformationEstimation2D::estimateRigidTransformation] Number "
"or points in source (%zu) differs than target (%zu)!\n",
static_cast<std::size_t>(nr_points),
return;
}
- ConstCloudIterator<PointSource> source_it (cloud_src);
- ConstCloudIterator<PointTarget> target_it (cloud_tgt);
- estimateRigidTransformation (source_it, target_it, transformation_matrix);
+ ConstCloudIterator<PointSource> source_it(cloud_src);
+ ConstCloudIterator<PointTarget> target_it(cloud_tgt);
+ estimateRigidTransformation(source_it, target_it, transformation_matrix);
}
-
-template <typename PointSource, typename PointTarget, typename Scalar> void
-TransformationEstimation2D<PointSource, PointTarget, Scalar>::estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const std::vector<int> &indices_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- Matrix4 &transformation_matrix) const
+template <typename PointSource, typename PointTarget, typename Scalar>
+void
+TransformationEstimation2D<PointSource, PointTarget, Scalar>::
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::Indices& indices_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ Matrix4& transformation_matrix) const
{
- if (indices_src.size () != cloud_tgt.size ())
- {
+ if (indices_src.size() != cloud_tgt.size()) {
PCL_ERROR("[pcl::Transformation2D::estimateRigidTransformation] Number or points "
"in source (%zu) differs than target (%zu)!\n",
indices_src.size(),
return;
}
- ConstCloudIterator<PointSource> source_it (cloud_src, indices_src);
- ConstCloudIterator<PointTarget> target_it (cloud_tgt);
- estimateRigidTransformation (source_it, target_it, transformation_matrix);
+ ConstCloudIterator<PointSource> source_it(cloud_src, indices_src);
+ ConstCloudIterator<PointTarget> target_it(cloud_tgt);
+ estimateRigidTransformation(source_it, target_it, transformation_matrix);
}
-
-template <typename PointSource, typename PointTarget, typename Scalar> inline void
-TransformationEstimation2D<PointSource, PointTarget, Scalar>::estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const std::vector<int> &indices_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- const std::vector<int> &indices_tgt,
- Matrix4 &transformation_matrix) const
+template <typename PointSource, typename PointTarget, typename Scalar>
+inline void
+TransformationEstimation2D<PointSource, PointTarget, Scalar>::
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::Indices& indices_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ const pcl::Indices& indices_tgt,
+ Matrix4& transformation_matrix) const
{
- if (indices_src.size () != indices_tgt.size ())
- {
- PCL_ERROR ("[pcl::TransformationEstimation2D::estimateRigidTransformation] Number or points in source (%lu) differs than target (%lu)!\n", indices_src.size (), indices_tgt.size ());
+ if (indices_src.size() != indices_tgt.size()) {
+ PCL_ERROR("[pcl::TransformationEstimation2D::estimateRigidTransformation] Number "
+ "or points in source (%lu) differs than target (%lu)!\n",
+ indices_src.size(),
+ indices_tgt.size());
return;
}
- ConstCloudIterator<PointSource> source_it (cloud_src, indices_src);
- ConstCloudIterator<PointTarget> target_it (cloud_tgt, indices_tgt);
- estimateRigidTransformation (source_it, target_it, transformation_matrix);
+ ConstCloudIterator<PointSource> source_it(cloud_src, indices_src);
+ ConstCloudIterator<PointTarget> target_it(cloud_tgt, indices_tgt);
+ estimateRigidTransformation(source_it, target_it, transformation_matrix);
}
-
-template <typename PointSource, typename PointTarget, typename Scalar> void
-TransformationEstimation2D<PointSource, PointTarget, Scalar>::estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- const pcl::Correspondences &correspondences,
- Matrix4 &transformation_matrix) const
+template <typename PointSource, typename PointTarget, typename Scalar>
+void
+TransformationEstimation2D<PointSource, PointTarget, Scalar>::
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ const pcl::Correspondences& correspondences,
+ Matrix4& transformation_matrix) const
{
- ConstCloudIterator<PointSource> source_it (cloud_src, correspondences, true);
- ConstCloudIterator<PointTarget> target_it (cloud_tgt, correspondences, false);
- estimateRigidTransformation (source_it, target_it, transformation_matrix);
+ ConstCloudIterator<PointSource> source_it(cloud_src, correspondences, true);
+ ConstCloudIterator<PointTarget> target_it(cloud_tgt, correspondences, false);
+ estimateRigidTransformation(source_it, target_it, transformation_matrix);
}
-
-template <typename PointSource, typename PointTarget, typename Scalar> inline void
-TransformationEstimation2D<PointSource, PointTarget, Scalar>::estimateRigidTransformation (
- ConstCloudIterator<PointSource>& source_it,
- ConstCloudIterator<PointTarget>& target_it,
- Matrix4 &transformation_matrix) const
+template <typename PointSource, typename PointTarget, typename Scalar>
+inline void
+TransformationEstimation2D<PointSource, PointTarget, Scalar>::
+ estimateRigidTransformation(ConstCloudIterator<PointSource>& source_it,
+ ConstCloudIterator<PointTarget>& target_it,
+ Matrix4& transformation_matrix) const
{
- source_it.reset (); target_it.reset ();
+ source_it.reset();
+ target_it.reset();
Eigen::Matrix<Scalar, 4, 1> centroid_src, centroid_tgt;
// Estimate the centroids of source, target
- compute3DCentroid (source_it, centroid_src);
- compute3DCentroid (target_it, centroid_tgt);
- source_it.reset (); target_it.reset ();
+ compute3DCentroid(source_it, centroid_src);
+ compute3DCentroid(target_it, centroid_tgt);
+ source_it.reset();
+ target_it.reset();
// ignore z component
centroid_src[2] = 0.0f;
centroid_tgt[2] = 0.0f;
// Subtract the centroids from source, target
- Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> cloud_src_demean, cloud_tgt_demean;
- demeanPointCloud (source_it, centroid_src, cloud_src_demean);
- demeanPointCloud (target_it, centroid_tgt, cloud_tgt_demean);
-
- getTransformationFromCorrelation (cloud_src_demean, centroid_src, cloud_tgt_demean, centroid_tgt, transformation_matrix);
+ Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> cloud_src_demean,
+ cloud_tgt_demean;
+ demeanPointCloud(source_it, centroid_src, cloud_src_demean);
+ demeanPointCloud(target_it, centroid_tgt, cloud_tgt_demean);
+
+ getTransformationFromCorrelation(cloud_src_demean,
+ centroid_src,
+ cloud_tgt_demean,
+ centroid_tgt,
+ transformation_matrix);
}
-
-template <typename PointSource, typename PointTarget, typename Scalar> void
-TransformationEstimation2D<PointSource, PointTarget, Scalar>::getTransformationFromCorrelation (
- const Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> &cloud_src_demean,
- const Eigen::Matrix<Scalar, 4, 1> ¢roid_src,
- const Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> &cloud_tgt_demean,
- const Eigen::Matrix<Scalar, 4, 1> ¢roid_tgt,
- Matrix4 &transformation_matrix) const
+template <typename PointSource, typename PointTarget, typename Scalar>
+void
+TransformationEstimation2D<PointSource, PointTarget, Scalar>::
+ getTransformationFromCorrelation(
+ const Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>& cloud_src_demean,
+ const Eigen::Matrix<Scalar, 4, 1>& centroid_src,
+ const Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>& cloud_tgt_demean,
+ const Eigen::Matrix<Scalar, 4, 1>& centroid_tgt,
+ Matrix4& transformation_matrix) const
{
- transformation_matrix.setIdentity ();
+ transformation_matrix.setIdentity();
// Assemble the correlation matrix H = source * target'
- Eigen::Matrix<Scalar, 3, 3> H = (cloud_src_demean * cloud_tgt_demean.transpose ()).topLeftCorner (3, 3);
+ Eigen::Matrix<Scalar, 3, 3> H =
+ (cloud_src_demean * cloud_tgt_demean.transpose()).topLeftCorner(3, 3);
- float angle = std::atan2 ((H (0, 1) - H (1, 0)), (H(0, 0) + H (1, 1)));
+ float angle = std::atan2((H(0, 1) - H(1, 0)), (H(0, 0) + H(1, 1)));
- Eigen::Matrix<Scalar, 3, 3> R (Eigen::Matrix<Scalar, 3, 3>::Identity ());
- R (0, 0) = R (1, 1) = std::cos (angle);
- R (0, 1) = -std::sin (angle);
- R (1, 0) = std::sin (angle);
+ Eigen::Matrix<Scalar, 3, 3> R(Eigen::Matrix<Scalar, 3, 3>::Identity());
+ R(0, 0) = R(1, 1) = std::cos(angle);
+ R(0, 1) = -std::sin(angle);
+ R(1, 0) = std::sin(angle);
// Return the correct transformation
- transformation_matrix.topLeftCorner (3, 3).matrix () = R;
- const Eigen::Matrix<Scalar, 3, 1> Rc (R * centroid_src.head (3).matrix ());
- transformation_matrix.block (0, 3, 3, 1).matrix () = centroid_tgt.head (3) - Rc;
+ transformation_matrix.topLeftCorner(3, 3).matrix() = R;
+ const Eigen::Matrix<Scalar, 3, 1> Rc(R * centroid_src.head(3).matrix());
+ transformation_matrix.block(0, 3, 3, 1).matrix() = centroid_tgt.head(3) - Rc;
}
} // namespace registration
} // namespace pcl
-#endif // PCL_REGISTRATION_TRANSFORMATION_ESTIMATION_2D_HPP_
-
+#endif // PCL_REGISTRATION_TRANSFORMATION_ESTIMATION_2D_HPP_
#include <pcl/registration/transformation_estimation_3point.h>
///////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointSource, typename PointTarget, typename Scalar> inline void
-pcl::registration::TransformationEstimation3Point<PointSource, PointTarget, Scalar>::estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- Matrix4 &transformation_matrix) const
+template <typename PointSource, typename PointTarget, typename Scalar>
+inline void
+pcl::registration::TransformationEstimation3Point<PointSource, PointTarget, Scalar>::
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ Matrix4& transformation_matrix) const
{
- if (cloud_src.size () != 3 || cloud_tgt.size () != 3)
- {
+ if (cloud_src.size() != 3 || cloud_tgt.size() != 3) {
PCL_ERROR("[pcl::TransformationEstimation3Point::estimateRigidTransformation] "
"Number of points in source (%zu) and target (%zu) must be 3!\n",
static_cast<std::size_t>(cloud_src.size()),
return;
}
- ConstCloudIterator<PointSource> source_it (cloud_src);
- ConstCloudIterator<PointTarget> target_it (cloud_tgt);
- estimateRigidTransformation (source_it, target_it, transformation_matrix);
+ ConstCloudIterator<PointSource> source_it(cloud_src);
+ ConstCloudIterator<PointTarget> target_it(cloud_tgt);
+ estimateRigidTransformation(source_it, target_it, transformation_matrix);
}
///////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointSource, typename PointTarget, typename Scalar> void
-pcl::registration::TransformationEstimation3Point<PointSource, PointTarget, Scalar>::estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const std::vector<int> &indices_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- Matrix4 &transformation_matrix) const
+template <typename PointSource, typename PointTarget, typename Scalar>
+void
+pcl::registration::TransformationEstimation3Point<PointSource, PointTarget, Scalar>::
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::Indices& indices_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ Matrix4& transformation_matrix) const
{
- if (indices_src.size () != 3 || cloud_tgt.size () != 3)
- {
+ if (indices_src.size() != 3 || cloud_tgt.size() != 3) {
PCL_ERROR(
"[pcl::TransformationEstimation3Point::estimateRigidTransformation] Number of "
"indices in source (%zu) and points in target (%zu) must be 3!\n",
return;
}
- ConstCloudIterator<PointSource> source_it (cloud_src, indices_src);
- ConstCloudIterator<PointTarget> target_it (cloud_tgt);
- estimateRigidTransformation (source_it, target_it, transformation_matrix);
+ ConstCloudIterator<PointSource> source_it(cloud_src, indices_src);
+ ConstCloudIterator<PointTarget> target_it(cloud_tgt);
+ estimateRigidTransformation(source_it, target_it, transformation_matrix);
}
///////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointSource, typename PointTarget, typename Scalar> inline void
-pcl::registration::TransformationEstimation3Point<PointSource, PointTarget, Scalar>::estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const std::vector<int> &indices_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- const std::vector<int> &indices_tgt,
- Matrix4 &transformation_matrix) const
+template <typename PointSource, typename PointTarget, typename Scalar>
+inline void
+pcl::registration::TransformationEstimation3Point<PointSource, PointTarget, Scalar>::
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::Indices& indices_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ const pcl::Indices& indices_tgt,
+ Matrix4& transformation_matrix) const
{
- if (indices_src.size () != 3 || indices_tgt.size () != 3)
- {
- PCL_ERROR ("[pcl::TransformationEstimation3Point::estimateRigidTransformation] Number of indices in source (%lu) and target (%lu) must be 3!\n",
- indices_src.size (), indices_tgt.size ());
+ if (indices_src.size() != 3 || indices_tgt.size() != 3) {
+ PCL_ERROR("[pcl::TransformationEstimation3Point::estimateRigidTransformation] "
+ "Number of indices in source (%lu) and target (%lu) must be 3!\n",
+ indices_src.size(),
+ indices_tgt.size());
return;
}
- ConstCloudIterator<PointSource> source_it (cloud_src, indices_src);
- ConstCloudIterator<PointTarget> target_it (cloud_tgt, indices_tgt);
- estimateRigidTransformation (source_it, target_it, transformation_matrix);
+ ConstCloudIterator<PointSource> source_it(cloud_src, indices_src);
+ ConstCloudIterator<PointTarget> target_it(cloud_tgt, indices_tgt);
+ estimateRigidTransformation(source_it, target_it, transformation_matrix);
}
///////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointSource, typename PointTarget, typename Scalar> void
-pcl::registration::TransformationEstimation3Point<PointSource, PointTarget, Scalar>::estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- const pcl::Correspondences &correspondences,
- Matrix4 &transformation_matrix) const
+template <typename PointSource, typename PointTarget, typename Scalar>
+void
+pcl::registration::TransformationEstimation3Point<PointSource, PointTarget, Scalar>::
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ const pcl::Correspondences& correspondences,
+ Matrix4& transformation_matrix) const
{
- if (correspondences.size () != 3)
- {
- PCL_ERROR ("[pcl::TransformationEstimation3Point::estimateRigidTransformation] Number of correspondences (%lu) must be 3!\n",
- correspondences.size ());
+ if (correspondences.size() != 3) {
+ PCL_ERROR("[pcl::TransformationEstimation3Point::estimateRigidTransformation] "
+ "Number of correspondences (%lu) must be 3!\n",
+ correspondences.size());
return;
}
- ConstCloudIterator<PointSource> source_it (cloud_src, correspondences, true);
- ConstCloudIterator<PointTarget> target_it (cloud_tgt, correspondences, false);
- estimateRigidTransformation (source_it, target_it, transformation_matrix);
+ ConstCloudIterator<PointSource> source_it(cloud_src, correspondences, true);
+ ConstCloudIterator<PointTarget> target_it(cloud_tgt, correspondences, false);
+ estimateRigidTransformation(source_it, target_it, transformation_matrix);
}
///////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointSource, typename PointTarget, typename Scalar> inline void
-pcl::registration::TransformationEstimation3Point<PointSource, PointTarget, Scalar>::estimateRigidTransformation (
- ConstCloudIterator<PointSource>& source_it,
- ConstCloudIterator<PointTarget>& target_it,
- Matrix4 &transformation_matrix) const
+template <typename PointSource, typename PointTarget, typename Scalar>
+inline void
+pcl::registration::TransformationEstimation3Point<PointSource, PointTarget, Scalar>::
+ estimateRigidTransformation(ConstCloudIterator<PointSource>& source_it,
+ ConstCloudIterator<PointTarget>& target_it,
+ Matrix4& transformation_matrix) const
{
- transformation_matrix.setIdentity ();
- source_it.reset ();
- target_it.reset ();
-
- Eigen::Matrix <Scalar, 4, 1> source_mean, target_mean;
- pcl::compute3DCentroid (source_it, source_mean);
- pcl::compute3DCentroid (target_it, target_mean);
-
- source_it.reset ();
- target_it.reset ();
-
- Eigen::Matrix <Scalar, Eigen::Dynamic, Eigen::Dynamic> source_demean, target_demean;
- pcl::demeanPointCloud (source_it, source_mean, source_demean, 3);
- pcl::demeanPointCloud (target_it, target_mean, target_demean, 3);
-
- source_it.reset ();
- target_it.reset ();
-
- Eigen::Matrix <Scalar, 3, 1> s1 = source_demean.col (1).head (3) - source_demean.col (0).head (3);
- s1.normalize ();
-
- Eigen::Matrix <Scalar, 3, 1> s2 = source_demean.col (2).head (3) - source_demean.col (0).head (3);
- s2 -= s2.dot (s1) * s1;
- s2.normalize ();
-
- Eigen::Matrix <Scalar, 3, 3> source_rot;
- source_rot.col (0) = s1;
- source_rot.col (1) = s2;
- source_rot.col (2) = s1.cross (s2);
-
- Eigen::Matrix <Scalar, 3, 1> t1 = target_demean.col (1).head (3) - target_demean.col (0).head (3);
- t1.normalize ();
-
- Eigen::Matrix <Scalar, 3, 1> t2 = target_demean.col (2).head (3) - target_demean.col (0).head (3);
- t2 -= t2.dot (t1) * t1;
- t2.normalize ();
-
- Eigen::Matrix <Scalar, 3, 3> target_rot;
- target_rot.col (0) = t1;
- target_rot.col (1) = t2;
- target_rot.col (2) = t1.cross (t2);
-
- //Eigen::Matrix <Scalar, 3, 3> R = source_rot * target_rot.transpose ();
- Eigen::Matrix <Scalar, 3, 3> R = target_rot * source_rot.transpose ();
- transformation_matrix.topLeftCorner (3, 3) = R;
- //transformation_matrix.block (0, 3, 3, 1) = source_mean.head (3) - R * target_mean.head (3);
- transformation_matrix.block (0, 3, 3, 1) = target_mean.head (3) - R * source_mean.head (3);
+ transformation_matrix.setIdentity();
+ source_it.reset();
+ target_it.reset();
+
+ Eigen::Matrix<Scalar, 4, 1> source_mean, target_mean;
+ pcl::compute3DCentroid(source_it, source_mean);
+ pcl::compute3DCentroid(target_it, target_mean);
+
+ source_it.reset();
+ target_it.reset();
+
+ Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> source_demean, target_demean;
+ pcl::demeanPointCloud(source_it, source_mean, source_demean, 3);
+ pcl::demeanPointCloud(target_it, target_mean, target_demean, 3);
+
+ source_it.reset();
+ target_it.reset();
+
+ Eigen::Matrix<Scalar, 3, 1> s1 =
+ source_demean.col(1).head(3) - source_demean.col(0).head(3);
+ s1.normalize();
+
+ Eigen::Matrix<Scalar, 3, 1> s2 =
+ source_demean.col(2).head(3) - source_demean.col(0).head(3);
+ s2 -= s2.dot(s1) * s1;
+ s2.normalize();
+
+ Eigen::Matrix<Scalar, 3, 3> source_rot;
+ source_rot.col(0) = s1;
+ source_rot.col(1) = s2;
+ source_rot.col(2) = s1.cross(s2);
+
+ Eigen::Matrix<Scalar, 3, 1> t1 =
+ target_demean.col(1).head(3) - target_demean.col(0).head(3);
+ t1.normalize();
+
+ Eigen::Matrix<Scalar, 3, 1> t2 =
+ target_demean.col(2).head(3) - target_demean.col(0).head(3);
+ t2 -= t2.dot(t1) * t1;
+ t2.normalize();
+
+ Eigen::Matrix<Scalar, 3, 3> target_rot;
+ target_rot.col(0) = t1;
+ target_rot.col(1) = t2;
+ target_rot.col(2) = t1.cross(t2);
+
+ // Eigen::Matrix <Scalar, 3, 3> R = source_rot * target_rot.transpose ();
+ Eigen::Matrix<Scalar, 3, 3> R = target_rot * source_rot.transpose();
+ transformation_matrix.topLeftCorner(3, 3) = R;
+ // transformation_matrix.block (0, 3, 3, 1) = source_mean.head (3) - R *
+ // target_mean.head (3);
+ transformation_matrix.block(0, 3, 3, 1) =
+ target_mean.head(3) - R * source_mean.head(3);
}
-//#define PCL_INSTANTIATE_TransformationEstimation3Point(T,U) template class PCL_EXPORTS pcl::registration::TransformationEstimation3Point<T,U>;
+//#define PCL_INSTANTIATE_TransformationEstimation3Point(T,U) template class PCL_EXPORTS
+// pcl::registration::TransformationEstimation3Point<T,U>;
#endif // PCL_REGISTRATION_IMPL_TRANSFORMATION_ESTIMATION_3POINT_H_
#define PCL_REGISTRATION_TRANSFORMATION_ESTIMATION_DQ_HPP_
#include <pcl/common/eigen.h>
+PCL_DEPRECATED_HEADER(1,
+ 15,
+ "TransformationEstimationDQ has been renamed to "
+ "TransformationEstimationDualQuaternion.");
+namespace pcl {
-namespace pcl
-{
-
-namespace registration
-{
+namespace registration {
-template <typename PointSource, typename PointTarget, typename Scalar> inline void
-TransformationEstimationDQ<PointSource, PointTarget, Scalar>::estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- Matrix4 &transformation_matrix) const
+template <typename PointSource, typename PointTarget, typename Scalar>
+inline void
+TransformationEstimationDQ<PointSource, PointTarget, Scalar>::
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ Matrix4& transformation_matrix) const
{
- const auto nr_points = cloud_src.size ();
- if (cloud_tgt.size () != nr_points)
- {
+ const auto nr_points = cloud_src.size();
+ if (cloud_tgt.size() != nr_points) {
PCL_ERROR("[pcl::TransformationEstimationDQ::estimateRigidTransformation] Number "
"or points in source (%zu) differs than target (%zu)!\n",
static_cast<std::size_t>(nr_points),
return;
}
- ConstCloudIterator<PointSource> source_it (cloud_src);
- ConstCloudIterator<PointTarget> target_it (cloud_tgt);
- estimateRigidTransformation (source_it, target_it, transformation_matrix);
+ ConstCloudIterator<PointSource> source_it(cloud_src);
+ ConstCloudIterator<PointTarget> target_it(cloud_tgt);
+ estimateRigidTransformation(source_it, target_it, transformation_matrix);
}
-
-template <typename PointSource, typename PointTarget, typename Scalar> void
-TransformationEstimationDQ<PointSource, PointTarget, Scalar>::estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const std::vector<int> &indices_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- Matrix4 &transformation_matrix) const
+template <typename PointSource, typename PointTarget, typename Scalar>
+void
+TransformationEstimationDQ<PointSource, PointTarget, Scalar>::
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::Indices& indices_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ Matrix4& transformation_matrix) const
{
- if (indices_src.size () != cloud_tgt.size ())
- {
+ if (indices_src.size() != cloud_tgt.size()) {
PCL_ERROR("[pcl::TransformationDQ::estimateRigidTransformation] Number or points "
"in source (%zu) differs than target (%zu)!\n",
indices_src.size(),
return;
}
- ConstCloudIterator<PointSource> source_it (cloud_src, indices_src);
- ConstCloudIterator<PointTarget> target_it (cloud_tgt);
- estimateRigidTransformation (source_it, target_it, transformation_matrix);
+ ConstCloudIterator<PointSource> source_it(cloud_src, indices_src);
+ ConstCloudIterator<PointTarget> target_it(cloud_tgt);
+ estimateRigidTransformation(source_it, target_it, transformation_matrix);
}
-
-template <typename PointSource, typename PointTarget, typename Scalar> inline void
-TransformationEstimationDQ<PointSource, PointTarget, Scalar>::estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const std::vector<int> &indices_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- const std::vector<int> &indices_tgt,
- Matrix4 &transformation_matrix) const
+template <typename PointSource, typename PointTarget, typename Scalar>
+inline void
+TransformationEstimationDQ<PointSource, PointTarget, Scalar>::
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::Indices& indices_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ const pcl::Indices& indices_tgt,
+ Matrix4& transformation_matrix) const
{
- if (indices_src.size () != indices_tgt.size ())
- {
+ if (indices_src.size() != indices_tgt.size()) {
PCL_ERROR("[pcl::TransformationEstimationDQ::estimateRigidTransformation] Number "
"or points in source (%zu) differs than target (%zu)!\n",
indices_src.size(),
return;
}
- ConstCloudIterator<PointSource> source_it (cloud_src, indices_src);
- ConstCloudIterator<PointTarget> target_it (cloud_tgt, indices_tgt);
- estimateRigidTransformation (source_it, target_it, transformation_matrix);
+ ConstCloudIterator<PointSource> source_it(cloud_src, indices_src);
+ ConstCloudIterator<PointTarget> target_it(cloud_tgt, indices_tgt);
+ estimateRigidTransformation(source_it, target_it, transformation_matrix);
}
-
-template <typename PointSource, typename PointTarget, typename Scalar> void
-TransformationEstimationDQ<PointSource, PointTarget, Scalar>::estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- const pcl::Correspondences &correspondences,
- Matrix4 &transformation_matrix) const
+template <typename PointSource, typename PointTarget, typename Scalar>
+void
+TransformationEstimationDQ<PointSource, PointTarget, Scalar>::
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ const pcl::Correspondences& correspondences,
+ Matrix4& transformation_matrix) const
{
- ConstCloudIterator<PointSource> source_it (cloud_src, correspondences, true);
- ConstCloudIterator<PointTarget> target_it (cloud_tgt, correspondences, false);
- estimateRigidTransformation (source_it, target_it, transformation_matrix);
+ ConstCloudIterator<PointSource> source_it(cloud_src, correspondences, true);
+ ConstCloudIterator<PointTarget> target_it(cloud_tgt, correspondences, false);
+ estimateRigidTransformation(source_it, target_it, transformation_matrix);
}
-
-template <typename PointSource, typename PointTarget, typename Scalar> inline void
-TransformationEstimationDQ<PointSource, PointTarget, Scalar>::estimateRigidTransformation (
- ConstCloudIterator<PointSource>& source_it,
- ConstCloudIterator<PointTarget>& target_it,
- Matrix4 &transformation_matrix) const
+template <typename PointSource, typename PointTarget, typename Scalar>
+inline void
+TransformationEstimationDQ<PointSource, PointTarget, Scalar>::
+ estimateRigidTransformation(ConstCloudIterator<PointSource>& source_it,
+ ConstCloudIterator<PointTarget>& target_it,
+ Matrix4& transformation_matrix) const
{
- const int npts = static_cast <int> (source_it.size ());
+ const int npts = static_cast<int>(source_it.size());
- transformation_matrix.setIdentity ();
+ transformation_matrix.setIdentity();
// dual quaternion optimization
- Eigen::Matrix<Scalar,4,4> C1 = Eigen::Matrix<Scalar,4,4>::Zero();
- Eigen::Matrix<Scalar,4,4> C2 = Eigen::Matrix<Scalar,4,4>::Zero();
- Scalar *c1 = C1.data();
- Scalar *c2 = C2.data();
-
- for( int i=0; i<npts; i++ ) {
- const PointSource &a = *source_it;
- const PointTarget &b = *target_it;
- const Scalar axbx = a.x*b.x;
- const Scalar ayby = a.y*b.y;
- const Scalar azbz = a.z*b.z;
- const Scalar axby = a.x*b.y;
- const Scalar aybx = a.y*b.x;
- const Scalar axbz = a.x*b.z;
- const Scalar azbx = a.z*b.x;
- const Scalar aybz = a.y*b.z;
- const Scalar azby = a.z*b.y;
+ Eigen::Matrix<Scalar, 4, 4> C1 = Eigen::Matrix<Scalar, 4, 4>::Zero();
+ Eigen::Matrix<Scalar, 4, 4> C2 = Eigen::Matrix<Scalar, 4, 4>::Zero();
+ Scalar* c1 = C1.data();
+ Scalar* c2 = C2.data();
+
+ for (int i = 0; i < npts; i++) {
+ const PointSource& a = *source_it;
+ const PointTarget& b = *target_it;
+ const Scalar axbx = a.x * b.x;
+ const Scalar ayby = a.y * b.y;
+ const Scalar azbz = a.z * b.z;
+ const Scalar axby = a.x * b.y;
+ const Scalar aybx = a.y * b.x;
+ const Scalar axbz = a.x * b.z;
+ const Scalar azbx = a.z * b.x;
+ const Scalar aybz = a.y * b.z;
+ const Scalar azby = a.z * b.y;
c1[0] += axbx - azbz - ayby;
c1[5] += ayby - azbz - axbx;
- c1[10]+= azbz - axbx - ayby;
- c1[15]+= axbx + ayby + azbz;
+ c1[10] += azbz - axbx - ayby;
+ c1[15] += axbx + ayby + azbz;
c1[1] += axby + aybx;
c1[2] += axbz + azbx;
c1[3] += aybz - azby;
c1[6] += azby + aybz;
c1[7] += azbx - axbz;
- c1[11]+= axby - aybx;
+ c1[11] += axby - aybx;
c2[1] += a.z + b.z;
c2[2] -= a.y + b.y;
c2[3] += a.x - b.x;
c2[6] += a.x + b.x;
c2[7] += a.y - b.y;
- c2[11]+= a.z - b.z;
+ c2[11] += a.z - b.z;
source_it++;
target_it++;
}
c1[4] = c1[1];
c1[8] = c1[2];
c1[9] = c1[6];
- c1[12]= c1[3];
- c1[13]= c1[7];
- c1[14]= c1[11];
+ c1[12] = c1[3];
+ c1[13] = c1[7];
+ c1[14] = c1[11];
c2[4] = -c2[1];
c2[8] = -c2[2];
- c2[12]= -c2[3];
+ c2[12] = -c2[3];
c2[9] = -c2[6];
- c2[13]= -c2[7];
- c2[14]= -c2[11];
+ c2[13] = -c2[7];
+ c2[14] = -c2[11];
C1 *= -2.0f;
C2 *= 2.0f;
- const Eigen::Matrix<Scalar,4,4> A = (0.25f/float(npts))*C2.transpose()*C2 - C1;
+ const Eigen::Matrix<Scalar, 4, 4> A =
+ (0.25f / float(npts)) * C2.transpose() * C2 - C1;
- const Eigen::EigenSolver< Eigen::Matrix<Scalar,4,4> > es(A);
+ const Eigen::EigenSolver<Eigen::Matrix<Scalar, 4, 4>> es(A);
ptrdiff_t i;
es.eigenvalues().real().maxCoeff(&i);
- const Eigen::Matrix<Scalar,4,1> qmat = es.eigenvectors().col(i).real();
- const Eigen::Matrix<Scalar,4,1> smat = -(0.5f/float(npts))*C2*qmat;
+ const Eigen::Matrix<Scalar, 4, 1> qmat = es.eigenvectors().col(i).real();
+ const Eigen::Matrix<Scalar, 4, 1> smat = -(0.5f / float(npts)) * C2 * qmat;
- const Eigen::Quaternion<Scalar> q( qmat(3), qmat(0), qmat(1), qmat(2) );
- const Eigen::Quaternion<Scalar> s( smat(3), smat(0), smat(1), smat(2) );
+ const Eigen::Quaternion<Scalar> q(qmat(3), qmat(0), qmat(1), qmat(2));
+ const Eigen::Quaternion<Scalar> s(smat(3), smat(0), smat(1), smat(2));
- const Eigen::Quaternion<Scalar> t = s*q.conjugate();
+ const Eigen::Quaternion<Scalar> t = s * q.conjugate();
- const Eigen::Matrix<Scalar,3,3> R( q.toRotationMatrix() );
+ const Eigen::Matrix<Scalar, 3, 3> R(q.toRotationMatrix());
- for( int i=0; i<3; ++i )
- for( int j=0; j<3; ++j)
- transformation_matrix(i,j) = R(i,j);
+ for (int i = 0; i < 3; ++i)
+ for (int j = 0; j < 3; ++j)
+ transformation_matrix(i, j) = R(i, j);
- transformation_matrix(0,3) = -t.x();
- transformation_matrix(1,3) = -t.y();
- transformation_matrix(2,3) = -t.z();
+ transformation_matrix(0, 3) = -t.x();
+ transformation_matrix(1, 3) = -t.y();
+ transformation_matrix(2, 3) = -t.z();
}
} // namespace registration
} // namespace pcl
#endif /* PCL_REGISTRATION_TRANSFORMATION_ESTIMATION_DQ_HPP_ */
-
#include <pcl/common/eigen.h>
+#include <Eigen/Eigenvalues> // for EigenSolver
-namespace pcl
-{
+namespace pcl {
-namespace registration
-{
+namespace registration {
-template <typename PointSource, typename PointTarget, typename Scalar> inline void
-TransformationEstimationDualQuaternion<PointSource, PointTarget, Scalar>::estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- Matrix4 &transformation_matrix) const
+template <typename PointSource, typename PointTarget, typename Scalar>
+inline void
+TransformationEstimationDualQuaternion<PointSource, PointTarget, Scalar>::
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ Matrix4& transformation_matrix) const
{
- const auto nr_points = cloud_src.size ();
- if (cloud_tgt.size () != nr_points)
- {
+ const auto nr_points = cloud_src.size();
+ if (cloud_tgt.size() != nr_points) {
PCL_ERROR(
"[pcl::TransformationEstimationDualQuaternion::estimateRigidTransformation] "
"Number or points in source (%zu) differs than target (%zu)!\n",
return;
}
- ConstCloudIterator<PointSource> source_it (cloud_src);
- ConstCloudIterator<PointTarget> target_it (cloud_tgt);
- estimateRigidTransformation (source_it, target_it, transformation_matrix);
+ ConstCloudIterator<PointSource> source_it(cloud_src);
+ ConstCloudIterator<PointTarget> target_it(cloud_tgt);
+ estimateRigidTransformation(source_it, target_it, transformation_matrix);
}
-
-template <typename PointSource, typename PointTarget, typename Scalar> void
-TransformationEstimationDualQuaternion<PointSource, PointTarget, Scalar>::estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const std::vector<int> &indices_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- Matrix4 &transformation_matrix) const
+template <typename PointSource, typename PointTarget, typename Scalar>
+void
+TransformationEstimationDualQuaternion<PointSource, PointTarget, Scalar>::
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::Indices& indices_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ Matrix4& transformation_matrix) const
{
- if (indices_src.size () != cloud_tgt.size ())
- {
+ if (indices_src.size() != cloud_tgt.size()) {
PCL_ERROR("[pcl::TransformationDQ::estimateRigidTransformation] Number or points "
"in source (%zu) differs than target (%zu)!\n",
indices_src.size(),
return;
}
- ConstCloudIterator<PointSource> source_it (cloud_src, indices_src);
- ConstCloudIterator<PointTarget> target_it (cloud_tgt);
- estimateRigidTransformation (source_it, target_it, transformation_matrix);
+ ConstCloudIterator<PointSource> source_it(cloud_src, indices_src);
+ ConstCloudIterator<PointTarget> target_it(cloud_tgt);
+ estimateRigidTransformation(source_it, target_it, transformation_matrix);
}
-
-template <typename PointSource, typename PointTarget, typename Scalar> inline void
-TransformationEstimationDualQuaternion<PointSource, PointTarget, Scalar>::estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const std::vector<int> &indices_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- const std::vector<int> &indices_tgt,
- Matrix4 &transformation_matrix) const
+template <typename PointSource, typename PointTarget, typename Scalar>
+inline void
+TransformationEstimationDualQuaternion<PointSource, PointTarget, Scalar>::
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::Indices& indices_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ const pcl::Indices& indices_tgt,
+ Matrix4& transformation_matrix) const
{
- if (indices_src.size () != indices_tgt.size ())
- {
- PCL_ERROR ("[pcl::TransformationEstimationDualQuaternion::estimateRigidTransformation] Number or points in source (%lu) differs than target (%lu)!\n", indices_src.size (), indices_tgt.size ());
+ if (indices_src.size() != indices_tgt.size()) {
+ PCL_ERROR(
+ "[pcl::TransformationEstimationDualQuaternion::estimateRigidTransformation] "
+ "Number or points in source (%lu) differs than target (%lu)!\n",
+ indices_src.size(),
+ indices_tgt.size());
return;
}
- ConstCloudIterator<PointSource> source_it (cloud_src, indices_src);
- ConstCloudIterator<PointTarget> target_it (cloud_tgt, indices_tgt);
- estimateRigidTransformation (source_it, target_it, transformation_matrix);
+ ConstCloudIterator<PointSource> source_it(cloud_src, indices_src);
+ ConstCloudIterator<PointTarget> target_it(cloud_tgt, indices_tgt);
+ estimateRigidTransformation(source_it, target_it, transformation_matrix);
}
-
-template <typename PointSource, typename PointTarget, typename Scalar> void
-TransformationEstimationDualQuaternion<PointSource, PointTarget, Scalar>::estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- const pcl::Correspondences &correspondences,
- Matrix4 &transformation_matrix) const
+template <typename PointSource, typename PointTarget, typename Scalar>
+void
+TransformationEstimationDualQuaternion<PointSource, PointTarget, Scalar>::
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ const pcl::Correspondences& correspondences,
+ Matrix4& transformation_matrix) const
{
- ConstCloudIterator<PointSource> source_it (cloud_src, correspondences, true);
- ConstCloudIterator<PointTarget> target_it (cloud_tgt, correspondences, false);
- estimateRigidTransformation (source_it, target_it, transformation_matrix);
+ ConstCloudIterator<PointSource> source_it(cloud_src, correspondences, true);
+ ConstCloudIterator<PointTarget> target_it(cloud_tgt, correspondences, false);
+ estimateRigidTransformation(source_it, target_it, transformation_matrix);
}
-
-template <typename PointSource, typename PointTarget, typename Scalar> inline void
-TransformationEstimationDualQuaternion<PointSource, PointTarget, Scalar>::estimateRigidTransformation (
- ConstCloudIterator<PointSource>& source_it,
- ConstCloudIterator<PointTarget>& target_it,
- Matrix4 &transformation_matrix) const
+template <typename PointSource, typename PointTarget, typename Scalar>
+inline void
+TransformationEstimationDualQuaternion<PointSource, PointTarget, Scalar>::
+ estimateRigidTransformation(ConstCloudIterator<PointSource>& source_it,
+ ConstCloudIterator<PointTarget>& target_it,
+ Matrix4& transformation_matrix) const
{
- const int npts = static_cast<int> (source_it.size ());
+ const int npts = static_cast<int>(source_it.size());
- transformation_matrix.setIdentity ();
+ transformation_matrix.setIdentity();
// dual quaternion optimization
- Eigen::Matrix<double, 4, 4> C1 = Eigen::Matrix<double, 4, 4>::Zero ();
- Eigen::Matrix<double, 4, 4> C2 = Eigen::Matrix<double, 4, 4>::Zero ();
- double* c1 = C1.data ();
- double* c2 = C2.data ();
+ Eigen::Matrix<double, 4, 4> C1 = Eigen::Matrix<double, 4, 4>::Zero();
+ Eigen::Matrix<double, 4, 4> C2 = Eigen::Matrix<double, 4, 4>::Zero();
+ double* c1 = C1.data();
+ double* c2 = C2.data();
- for (int i = 0; i < npts; ++i)
- {
+ for (int i = 0; i < npts; ++i) {
const PointSource& a = *source_it;
const PointTarget& b = *target_it;
const double axbx = a.x * b.x;
const double azbx = a.z * b.x;
const double aybz = a.y * b.z;
const double azby = a.z * b.y;
- c1[0] += axbx - azbz - ayby;
- c1[5] += ayby - azbz - axbx;
+ c1[0] += axbx - azbz - ayby;
+ c1[5] += ayby - azbz - axbx;
c1[10] += azbz - axbx - ayby;
c1[15] += axbx + ayby + azbz;
- c1[1] += axby + aybx;
- c1[2] += axbz + azbx;
- c1[3] += aybz - azby;
- c1[6] += azby + aybz;
- c1[7] += azbx - axbz;
+ c1[1] += axby + aybx;
+ c1[2] += axbz + azbx;
+ c1[3] += aybz - azby;
+ c1[6] += azby + aybz;
+ c1[7] += azbx - axbz;
c1[11] += axby - aybx;
- c2[1] += a.z + b.z;
- c2[2] -= a.y + b.y;
- c2[3] += a.x - b.x;
- c2[6] += a.x + b.x;
- c2[7] += a.y - b.y;
+ c2[1] += a.z + b.z;
+ c2[2] -= a.y + b.y;
+ c2[3] += a.x - b.x;
+ c2[6] += a.x + b.x;
+ c2[7] += a.y - b.y;
c2[11] += a.z - b.z;
source_it++;
target_it++;
}
- c1[4] = c1[1];
- c1[8] = c1[2];
- c1[9] = c1[6];
+ c1[4] = c1[1];
+ c1[8] = c1[2];
+ c1[9] = c1[6];
c1[12] = c1[3];
c1[13] = c1[7];
c1[14] = c1[11];
- c2[4] = -c2[1];
- c2[8] = -c2[2];
+ c2[4] = -c2[1];
+ c2[8] = -c2[2];
c2[12] = -c2[3];
- c2[9] = -c2[6];
+ c2[9] = -c2[6];
c2[13] = -c2[7];
c2[14] = -c2[11];
C1 *= -2.0;
C2 *= 2.0;
- const Eigen::Matrix<double, 4, 4> A = (0.25 / double (npts)) * C2.transpose () * C2 - C1;
+ const Eigen::Matrix<double, 4, 4> A =
+ (0.25 / double(npts)) * C2.transpose() * C2 - C1;
- const Eigen::EigenSolver<Eigen::Matrix<double, 4, 4> > es (A);
+ const Eigen::EigenSolver<Eigen::Matrix<double, 4, 4>> es(A);
ptrdiff_t i;
- es.eigenvalues ().real ().maxCoeff (&i);
- const Eigen::Matrix<double, 4, 1> qmat = es.eigenvectors ().col (i).real ();
- const Eigen::Matrix<double, 4, 1> smat = - (0.5 / double (npts)) * C2 * qmat;
+ es.eigenvalues().real().maxCoeff(&i);
+ const Eigen::Matrix<double, 4, 1> qmat = es.eigenvectors().col(i).real();
+ const Eigen::Matrix<double, 4, 1> smat = -(0.5 / double(npts)) * C2 * qmat;
- const Eigen::Quaternion<double> q (qmat (3), qmat (0), qmat (1), qmat (2));
- const Eigen::Quaternion<double> s (smat (3), smat (0), smat (1), smat (2));
+ const Eigen::Quaternion<double> q(qmat(3), qmat(0), qmat(1), qmat(2));
+ const Eigen::Quaternion<double> s(smat(3), smat(0), smat(1), smat(2));
- const Eigen::Quaternion<double> t = s * q.conjugate ();
+ const Eigen::Quaternion<double> t = s * q.conjugate();
- const Eigen::Matrix<double, 3, 3> R (q.toRotationMatrix ());
+ const Eigen::Matrix<double, 3, 3> R(q.toRotationMatrix());
for (int i = 0; i < 3; ++i)
for (int j = 0; j < 3; ++j)
- transformation_matrix (i, j) = R (i, j);
+ transformation_matrix(i, j) = R(i, j);
- transformation_matrix (0, 3) = - t.x ();
- transformation_matrix (1, 3) = - t.y ();
- transformation_matrix (2, 3) = - t.z ();
+ transformation_matrix(0, 3) = -t.x();
+ transformation_matrix(1, 3) = -t.y();
+ transformation_matrix(2, 3) = -t.z();
}
} // namespace registration
} // namespace pcl
#endif /* PCL_REGISTRATION_TRANSFORMATION_ESTIMATION_DQ_HPP_ */
-
#ifndef PCL_REGISTRATION_TRANSFORMATION_ESTIMATION_LM_HPP_
#define PCL_REGISTRATION_TRANSFORMATION_ESTIMATION_LM_HPP_
-#include <pcl/registration/warp_point_rigid.h>
#include <pcl/registration/warp_point_rigid_6d.h>
-#include <pcl/registration/distances.h>
-#include <unsupported/Eigen/NonLinearOptimization>
+#include <unsupported/Eigen/NonLinearOptimization>
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointSource, typename PointTarget, typename MatScalar>
-pcl::registration::TransformationEstimationLM<PointSource, PointTarget, MatScalar>::TransformationEstimationLM ()
- : tmp_src_ ()
- , tmp_tgt_ ()
- , tmp_idx_src_ ()
- , tmp_idx_tgt_ ()
- , warp_point_ (new WarpPointRigid6D<PointSource, PointTarget, MatScalar>)
-{
-};
+pcl::registration::TransformationEstimationLM<PointSource, PointTarget, MatScalar>::
+ TransformationEstimationLM()
+: tmp_src_()
+, tmp_tgt_()
+, tmp_idx_src_()
+, tmp_idx_tgt_()
+, warp_point_(new WarpPointRigid6D<PointSource, PointTarget, MatScalar>){};
//////////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointSource, typename PointTarget, typename MatScalar> void
-pcl::registration::TransformationEstimationLM<PointSource, PointTarget, MatScalar>::estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- Matrix4 &transformation_matrix) const
+template <typename PointSource, typename PointTarget, typename MatScalar>
+void
+pcl::registration::TransformationEstimationLM<PointSource, PointTarget, MatScalar>::
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ Matrix4& transformation_matrix) const
{
// <cloud_src,cloud_src> is the source dataset
- if (cloud_src.size () != cloud_tgt.size ())
- {
- PCL_ERROR ("[pcl::registration::TransformationEstimationLM::estimateRigidTransformation] ");
+ if (cloud_src.size() != cloud_tgt.size()) {
+ PCL_ERROR("[pcl::registration::TransformationEstimationLM::"
+ "estimateRigidTransformation] ");
PCL_ERROR("Number or points in source (%zu) differs than target (%zu)!\n",
static_cast<std::size_t>(cloud_src.size()),
static_cast<std::size_t>(cloud_tgt.size()));
return;
}
- if (cloud_src.size () < 4) // need at least 4 samples
+ if (cloud_src.size() < 4) // need at least 4 samples
{
- PCL_ERROR ("[pcl::registration::TransformationEstimationLM::estimateRigidTransformation] ");
+ PCL_ERROR("[pcl::registration::TransformationEstimationLM::"
+ "estimateRigidTransformation] ");
PCL_ERROR("Need at least 4 points to estimate a transform! Source and target have "
"%zu points!\n",
static_cast<std::size_t>(cloud_src.size()));
return;
}
- int n_unknowns = warp_point_->getDimension ();
- VectorX x (n_unknowns);
- x.setZero ();
-
+ int n_unknowns = warp_point_->getDimension();
+ VectorX x(n_unknowns);
+ x.setZero();
+
// Set temporary pointers
tmp_src_ = &cloud_src;
tmp_tgt_ = &cloud_tgt;
- OptimizationFunctor functor (static_cast<int> (cloud_src.size ()), this);
- Eigen::NumericalDiff<OptimizationFunctor> num_diff (functor);
- //Eigen::LevenbergMarquardt<Eigen::NumericalDiff<OptimizationFunctor>, double> lm (num_diff);
- Eigen::LevenbergMarquardt<Eigen::NumericalDiff<OptimizationFunctor>, MatScalar> lm (num_diff);
- int info = lm.minimize (x);
+ OptimizationFunctor functor(static_cast<int>(cloud_src.size()), this);
+ Eigen::NumericalDiff<OptimizationFunctor> num_diff(functor);
+ // Eigen::LevenbergMarquardt<Eigen::NumericalDiff<OptimizationFunctor>, double> lm
+ // (num_diff);
+ Eigen::LevenbergMarquardt<Eigen::NumericalDiff<OptimizationFunctor>, MatScalar> lm(
+ num_diff);
+ int info = lm.minimize(x);
// Compute the norm of the residuals
- PCL_DEBUG ("[pcl::registration::TransformationEstimationLM::estimateRigidTransformation]");
- PCL_DEBUG ("LM solver finished with exit code %i, having a residual norm of %g. \n", info, lm.fvec.norm ());
- PCL_DEBUG ("Final solution: [%f", x[0]);
- for (int i = 1; i < n_unknowns; ++i)
- PCL_DEBUG (" %f", x[i]);
- PCL_DEBUG ("]\n");
+ PCL_DEBUG(
+ "[pcl::registration::TransformationEstimationLM::estimateRigidTransformation]");
+ PCL_DEBUG("LM solver finished with exit code %i, having a residual norm of %g. \n",
+ info,
+ lm.fvec.norm());
+ PCL_DEBUG("Final solution: [%f", x[0]);
+ for (int i = 1; i < n_unknowns; ++i)
+ PCL_DEBUG(" %f", x[i]);
+ PCL_DEBUG("]\n");
// Return the correct transformation
- warp_point_->setParam (x);
- transformation_matrix = warp_point_->getTransform ();
+ warp_point_->setParam(x);
+ transformation_matrix = warp_point_->getTransform();
tmp_src_ = nullptr;
tmp_tgt_ = nullptr;
}
//////////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointSource, typename PointTarget, typename MatScalar> void
-pcl::registration::TransformationEstimationLM<PointSource, PointTarget, MatScalar>::estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const std::vector<int> &indices_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- Matrix4 &transformation_matrix) const
+template <typename PointSource, typename PointTarget, typename MatScalar>
+void
+pcl::registration::TransformationEstimationLM<PointSource, PointTarget, MatScalar>::
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::Indices& indices_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ Matrix4& transformation_matrix) const
{
- if (indices_src.size () != cloud_tgt.size ())
- {
+ if (indices_src.size() != cloud_tgt.size()) {
PCL_ERROR(
"[pcl::registration::TransformationEstimationLM::estimateRigidTransformation] "
"Number or points in source (%zu) differs than target (%zu)!\n",
}
// <cloud_src,cloud_src> is the source dataset
- transformation_matrix.setIdentity ();
+ transformation_matrix.setIdentity();
- const auto nr_correspondences = cloud_tgt.size ();
- std::vector<int> indices_tgt;
+ const auto nr_correspondences = cloud_tgt.size();
+ pcl::Indices indices_tgt;
indices_tgt.resize(nr_correspondences);
for (std::size_t i = 0; i < nr_correspondences; ++i)
indices_tgt[i] = i;
- estimateRigidTransformation(cloud_src, indices_src, cloud_tgt, indices_tgt, transformation_matrix);
+ estimateRigidTransformation(
+ cloud_src, indices_src, cloud_tgt, indices_tgt, transformation_matrix);
}
//////////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointSource, typename PointTarget, typename MatScalar> inline void
-pcl::registration::TransformationEstimationLM<PointSource, PointTarget, MatScalar>::estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const std::vector<int> &indices_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- const std::vector<int> &indices_tgt,
- Matrix4 &transformation_matrix) const
+template <typename PointSource, typename PointTarget, typename MatScalar>
+inline void
+pcl::registration::TransformationEstimationLM<PointSource, PointTarget, MatScalar>::
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::Indices& indices_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ const pcl::Indices& indices_tgt,
+ Matrix4& transformation_matrix) const
{
- if (indices_src.size () != indices_tgt.size ())
- {
- PCL_ERROR ("[pcl::registration::TransformationEstimationLM::estimateRigidTransformation] Number or points in source (%lu) differs than target (%lu)!\n", indices_src.size (), indices_tgt.size ());
+ if (indices_src.size() != indices_tgt.size()) {
+ PCL_ERROR(
+ "[pcl::registration::TransformationEstimationLM::estimateRigidTransformation] "
+ "Number or points in source (%lu) differs than target (%lu)!\n",
+ indices_src.size(),
+ indices_tgt.size());
return;
}
- if (indices_src.size () < 4) // need at least 4 samples
+ if (indices_src.size() < 4) // need at least 4 samples
{
- PCL_ERROR ("[pcl::IterativeClosestPointNonLinear::estimateRigidTransformationLM] ");
- PCL_ERROR ("Need at least 4 points to estimate a transform! Source and target have %lu points!",
- indices_src.size ());
+ PCL_ERROR("[pcl::IterativeClosestPointNonLinear::estimateRigidTransformationLM] ");
+ PCL_ERROR("Need at least 4 points to estimate a transform! Source and target have "
+ "%lu points!",
+ indices_src.size());
return;
}
- int n_unknowns = warp_point_->getDimension (); // get dimension of unknown space
- VectorX x (n_unknowns);
- x.setConstant (n_unknowns, 0);
+ int n_unknowns = warp_point_->getDimension(); // get dimension of unknown space
+ VectorX x(n_unknowns);
+ x.setConstant(n_unknowns, 0);
// Set temporary pointers
tmp_src_ = &cloud_src;
tmp_idx_src_ = &indices_src;
tmp_idx_tgt_ = &indices_tgt;
- OptimizationFunctorWithIndices functor (static_cast<int> (indices_src.size ()), this);
- Eigen::NumericalDiff<OptimizationFunctorWithIndices> num_diff (functor);
- //Eigen::LevenbergMarquardt<Eigen::NumericalDiff<OptimizationFunctorWithIndices> > lm (num_diff);
- Eigen::LevenbergMarquardt<Eigen::NumericalDiff<OptimizationFunctorWithIndices>, MatScalar> lm (num_diff);
- int info = lm.minimize (x);
+ OptimizationFunctorWithIndices functor(static_cast<int>(indices_src.size()), this);
+ Eigen::NumericalDiff<OptimizationFunctorWithIndices> num_diff(functor);
+ // Eigen::LevenbergMarquardt<Eigen::NumericalDiff<OptimizationFunctorWithIndices> > lm
+ // (num_diff);
+ Eigen::LevenbergMarquardt<Eigen::NumericalDiff<OptimizationFunctorWithIndices>,
+ MatScalar>
+ lm(num_diff);
+ int info = lm.minimize(x);
// Compute the norm of the residuals
- PCL_DEBUG ("[pcl::registration::TransformationEstimationLM::estimateRigidTransformation] LM solver finished with exit code %i, having a residual norm of %g. \n", info, lm.fvec.norm ());
- PCL_DEBUG ("Final solution: [%f", x[0]);
- for (int i = 1; i < n_unknowns; ++i)
- PCL_DEBUG (" %f", x[i]);
- PCL_DEBUG ("]\n");
+ PCL_DEBUG(
+ "[pcl::registration::TransformationEstimationLM::estimateRigidTransformation] LM "
+ "solver finished with exit code %i, having a residual norm of %g. \n",
+ info,
+ lm.fvec.norm());
+ PCL_DEBUG("Final solution: [%f", x[0]);
+ for (int i = 1; i < n_unknowns; ++i)
+ PCL_DEBUG(" %f", x[i]);
+ PCL_DEBUG("]\n");
// Return the correct transformation
- warp_point_->setParam (x);
- transformation_matrix = warp_point_->getTransform ();
+ warp_point_->setParam(x);
+ transformation_matrix = warp_point_->getTransform();
tmp_src_ = nullptr;
tmp_tgt_ = nullptr;
}
//////////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointSource, typename PointTarget, typename MatScalar> inline void
-pcl::registration::TransformationEstimationLM<PointSource, PointTarget, MatScalar>::estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- const pcl::Correspondences &correspondences,
- Matrix4 &transformation_matrix) const
+template <typename PointSource, typename PointTarget, typename MatScalar>
+inline void
+pcl::registration::TransformationEstimationLM<PointSource, PointTarget, MatScalar>::
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ const pcl::Correspondences& correspondences,
+ Matrix4& transformation_matrix) const
{
- const auto nr_correspondences = correspondences.size ();
- std::vector<int> indices_src (nr_correspondences);
- std::vector<int> indices_tgt (nr_correspondences);
- for (std::size_t i = 0; i < nr_correspondences; ++i)
- {
+ const auto nr_correspondences = correspondences.size();
+ pcl::Indices indices_src(nr_correspondences);
+ pcl::Indices indices_tgt(nr_correspondences);
+ for (std::size_t i = 0; i < nr_correspondences; ++i) {
indices_src[i] = correspondences[i].index_query;
indices_tgt[i] = correspondences[i].index_match;
}
- estimateRigidTransformation (cloud_src, indices_src, cloud_tgt, indices_tgt, transformation_matrix);
+ estimateRigidTransformation(
+ cloud_src, indices_src, cloud_tgt, indices_tgt, transformation_matrix);
}
//////////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointSource, typename PointTarget, typename MatScalar> int
-pcl::registration::TransformationEstimationLM<PointSource, PointTarget, MatScalar>::OptimizationFunctor::operator () (
- const VectorX &x, VectorX &fvec) const
+template <typename PointSource, typename PointTarget, typename MatScalar>
+int
+pcl::registration::TransformationEstimationLM<PointSource, PointTarget, MatScalar>::
+ OptimizationFunctor::operator()(const VectorX& x, VectorX& fvec) const
{
- const PointCloud<PointSource> & src_points = *estimator_->tmp_src_;
- const PointCloud<PointTarget> & tgt_points = *estimator_->tmp_tgt_;
+ const PointCloud<PointSource>& src_points = *estimator_->tmp_src_;
+ const PointCloud<PointTarget>& tgt_points = *estimator_->tmp_tgt_;
// Initialize the warp function with the given parameters
- estimator_->warp_point_->setParam (x);
+ estimator_->warp_point_->setParam(x);
- // Transform each source point and compute its distance to the corresponding target point
- for (int i = 0; i < values (); ++i)
- {
- const PointSource & p_src = src_points[i];
- const PointTarget & p_tgt = tgt_points[i];
+ // Transform each source point and compute its distance to the corresponding target
+ // point
+ for (int i = 0; i < values(); ++i) {
+ const PointSource& p_src = src_points[i];
+ const PointTarget& p_tgt = tgt_points[i];
// Transform the source point based on the current warp parameters
Vector4 p_src_warped;
- estimator_->warp_point_->warpPoint (p_src, p_src_warped);
+ estimator_->warp_point_->warpPoint(p_src, p_src_warped);
// Estimate the distance (cost function)
- fvec[i] = estimator_->computeDistance (p_src_warped, p_tgt);
+ fvec[i] = estimator_->computeDistance(p_src_warped, p_tgt);
}
return (0);
}
//////////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointSource, typename PointTarget, typename MatScalar> int
-pcl::registration::TransformationEstimationLM<PointSource, PointTarget, MatScalar>::OptimizationFunctorWithIndices::operator() (
- const VectorX &x, VectorX &fvec) const
+template <typename PointSource, typename PointTarget, typename MatScalar>
+int
+pcl::registration::TransformationEstimationLM<PointSource, PointTarget, MatScalar>::
+ OptimizationFunctorWithIndices::operator()(const VectorX& x, VectorX& fvec) const
{
- const PointCloud<PointSource> & src_points = *estimator_->tmp_src_;
- const PointCloud<PointTarget> & tgt_points = *estimator_->tmp_tgt_;
- const std::vector<int> & src_indices = *estimator_->tmp_idx_src_;
- const std::vector<int> & tgt_indices = *estimator_->tmp_idx_tgt_;
+ const PointCloud<PointSource>& src_points = *estimator_->tmp_src_;
+ const PointCloud<PointTarget>& tgt_points = *estimator_->tmp_tgt_;
+ const pcl::Indices& src_indices = *estimator_->tmp_idx_src_;
+ const pcl::Indices& tgt_indices = *estimator_->tmp_idx_tgt_;
// Initialize the warp function with the given parameters
- estimator_->warp_point_->setParam (x);
+ estimator_->warp_point_->setParam(x);
- // Transform each source point and compute its distance to the corresponding target point
- for (int i = 0; i < values (); ++i)
- {
- const PointSource & p_src = src_points[src_indices[i]];
- const PointTarget & p_tgt = tgt_points[tgt_indices[i]];
+ // Transform each source point and compute its distance to the corresponding target
+ // point
+ for (int i = 0; i < values(); ++i) {
+ const PointSource& p_src = src_points[src_indices[i]];
+ const PointTarget& p_tgt = tgt_points[tgt_indices[i]];
// Transform the source point based on the current warp parameters
Vector4 p_src_warped;
- estimator_->warp_point_->warpPoint (p_src, p_src_warped);
-
+ estimator_->warp_point_->warpPoint(p_src, p_src_warped);
+
// Estimate the distance (cost function)
- fvec[i] = estimator_->computeDistance (p_src_warped, p_tgt);
+ fvec[i] = estimator_->computeDistance(p_src_warped, p_tgt);
}
return (0);
}
-//#define PCL_INSTANTIATE_TransformationEstimationLM(T,U) template class PCL_EXPORTS pcl::registration::TransformationEstimationLM<T,U>;
+//#define PCL_INSTANTIATE_TransformationEstimationLM(T,U) template class PCL_EXPORTS
+// pcl::registration::TransformationEstimationLM<T,U>;
#endif /* PCL_REGISTRATION_TRANSFORMATION_ESTIMATION_LM_HPP_ */
#include <pcl/cloud_iterator.h>
+namespace pcl {
-namespace pcl
-{
-
-namespace registration
-{
+namespace registration {
-template <typename PointSource, typename PointTarget, typename Scalar> inline void
+template <typename PointSource, typename PointTarget, typename Scalar>
+inline void
TransformationEstimationPointToPlaneLLS<PointSource, PointTarget, Scalar>::
-estimateRigidTransformation (const pcl::PointCloud<PointSource> &cloud_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- Matrix4 &transformation_matrix) const
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ Matrix4& transformation_matrix) const
{
- const auto nr_points = cloud_src.size ();
- if (cloud_tgt.size () != nr_points)
- {
+ const auto nr_points = cloud_src.size();
+ if (cloud_tgt.size() != nr_points) {
PCL_ERROR(
"[pcl::TransformationEstimationPointToPlaneLLS::estimateRigidTransformation] "
"Number or points in source (%zu) differs than target (%zu)!\n",
return;
}
- ConstCloudIterator<PointSource> source_it (cloud_src);
- ConstCloudIterator<PointTarget> target_it (cloud_tgt);
- estimateRigidTransformation (source_it, target_it, transformation_matrix);
+ ConstCloudIterator<PointSource> source_it(cloud_src);
+ ConstCloudIterator<PointTarget> target_it(cloud_tgt);
+ estimateRigidTransformation(source_it, target_it, transformation_matrix);
}
-
-template <typename PointSource, typename PointTarget, typename Scalar> void
+template <typename PointSource, typename PointTarget, typename Scalar>
+void
TransformationEstimationPointToPlaneLLS<PointSource, PointTarget, Scalar>::
-estimateRigidTransformation (const pcl::PointCloud<PointSource> &cloud_src,
- const std::vector<int> &indices_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- Matrix4 &transformation_matrix) const
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::Indices& indices_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ Matrix4& transformation_matrix) const
{
- const auto nr_points = indices_src.size ();
- if (cloud_tgt.size () != nr_points)
- {
+ const auto nr_points = indices_src.size();
+ if (cloud_tgt.size() != nr_points) {
PCL_ERROR(
"[pcl::TransformationEstimationPointToPlaneLLS::estimateRigidTransformation] "
"Number or points in source (%zu) differs than target (%zu)!\n",
return;
}
- ConstCloudIterator<PointSource> source_it (cloud_src, indices_src);
- ConstCloudIterator<PointTarget> target_it (cloud_tgt);
- estimateRigidTransformation (source_it, target_it, transformation_matrix);
+ ConstCloudIterator<PointSource> source_it(cloud_src, indices_src);
+ ConstCloudIterator<PointTarget> target_it(cloud_tgt);
+ estimateRigidTransformation(source_it, target_it, transformation_matrix);
}
-
-template <typename PointSource, typename PointTarget, typename Scalar> inline void
+template <typename PointSource, typename PointTarget, typename Scalar>
+inline void
TransformationEstimationPointToPlaneLLS<PointSource, PointTarget, Scalar>::
-estimateRigidTransformation (const pcl::PointCloud<PointSource> &cloud_src,
- const std::vector<int> &indices_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- const std::vector<int> &indices_tgt,
- Matrix4 &transformation_matrix) const
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::Indices& indices_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ const pcl::Indices& indices_tgt,
+ Matrix4& transformation_matrix) const
{
- const auto nr_points = indices_src.size ();
- if (indices_tgt.size () != nr_points)
- {
+ const auto nr_points = indices_src.size();
+ if (indices_tgt.size() != nr_points) {
PCL_ERROR(
"[pcl::TransformationEstimationPointToPlaneLLS::estimateRigidTransformation] "
"Number or points in source (%zu) differs than target (%zu)!\n",
return;
}
- ConstCloudIterator<PointSource> source_it (cloud_src, indices_src);
- ConstCloudIterator<PointTarget> target_it (cloud_tgt, indices_tgt);
- estimateRigidTransformation (source_it, target_it, transformation_matrix);
+ ConstCloudIterator<PointSource> source_it(cloud_src, indices_src);
+ ConstCloudIterator<PointTarget> target_it(cloud_tgt, indices_tgt);
+ estimateRigidTransformation(source_it, target_it, transformation_matrix);
}
-
-template <typename PointSource, typename PointTarget, typename Scalar> inline void
+template <typename PointSource, typename PointTarget, typename Scalar>
+inline void
TransformationEstimationPointToPlaneLLS<PointSource, PointTarget, Scalar>::
-estimateRigidTransformation (const pcl::PointCloud<PointSource> &cloud_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- const pcl::Correspondences &correspondences,
- Matrix4 &transformation_matrix) const
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ const pcl::Correspondences& correspondences,
+ Matrix4& transformation_matrix) const
{
- ConstCloudIterator<PointSource> source_it (cloud_src, correspondences, true);
- ConstCloudIterator<PointTarget> target_it (cloud_tgt, correspondences, false);
- estimateRigidTransformation (source_it, target_it, transformation_matrix);
+ ConstCloudIterator<PointSource> source_it(cloud_src, correspondences, true);
+ ConstCloudIterator<PointTarget> target_it(cloud_tgt, correspondences, false);
+ estimateRigidTransformation(source_it, target_it, transformation_matrix);
}
-
-template <typename PointSource, typename PointTarget, typename Scalar> inline void
+template <typename PointSource, typename PointTarget, typename Scalar>
+inline void
TransformationEstimationPointToPlaneLLS<PointSource, PointTarget, Scalar>::
-constructTransformationMatrix (const double & alpha, const double & beta, const double & gamma,
- const double & tx, const double & ty, const double & tz,
- Matrix4 &transformation_matrix) const
+ constructTransformationMatrix(const double& alpha,
+ const double& beta,
+ const double& gamma,
+ const double& tx,
+ const double& ty,
+ const double& tz,
+ Matrix4& transformation_matrix) const
{
// Construct the transformation matrix from rotation and translation
- transformation_matrix = Eigen::Matrix<Scalar, 4, 4>::Zero ();
- transformation_matrix (0, 0) = static_cast<Scalar> ( std::cos (gamma) * std::cos (beta));
- transformation_matrix (0, 1) = static_cast<Scalar> (-sin (gamma) * std::cos (alpha) + std::cos (gamma) * sin (beta) * sin (alpha));
- transformation_matrix (0, 2) = static_cast<Scalar> ( sin (gamma) * sin (alpha) + std::cos (gamma) * sin (beta) * std::cos (alpha));
- transformation_matrix (1, 0) = static_cast<Scalar> ( sin (gamma) * std::cos (beta));
- transformation_matrix (1, 1) = static_cast<Scalar> ( std::cos (gamma) * std::cos (alpha) + sin (gamma) * sin (beta) * sin (alpha));
- transformation_matrix (1, 2) = static_cast<Scalar> (-std::cos (gamma) * sin (alpha) + sin (gamma) * sin (beta) * std::cos (alpha));
- transformation_matrix (2, 0) = static_cast<Scalar> (-sin (beta));
- transformation_matrix (2, 1) = static_cast<Scalar> ( std::cos (beta) * sin (alpha));
- transformation_matrix (2, 2) = static_cast<Scalar> ( std::cos (beta) * std::cos (alpha));
-
- transformation_matrix (0, 3) = static_cast<Scalar> (tx);
- transformation_matrix (1, 3) = static_cast<Scalar> (ty);
- transformation_matrix (2, 3) = static_cast<Scalar> (tz);
- transformation_matrix (3, 3) = static_cast<Scalar> (1);
+ transformation_matrix = Eigen::Matrix<Scalar, 4, 4>::Zero();
+ transformation_matrix(0, 0) = static_cast<Scalar>(std::cos(gamma) * std::cos(beta));
+ transformation_matrix(0, 1) = static_cast<Scalar>(
+ -sin(gamma) * std::cos(alpha) + std::cos(gamma) * sin(beta) * sin(alpha));
+ transformation_matrix(0, 2) = static_cast<Scalar>(
+ sin(gamma) * sin(alpha) + std::cos(gamma) * sin(beta) * std::cos(alpha));
+ transformation_matrix(1, 0) = static_cast<Scalar>(sin(gamma) * std::cos(beta));
+ transformation_matrix(1, 1) = static_cast<Scalar>(
+ std::cos(gamma) * std::cos(alpha) + sin(gamma) * sin(beta) * sin(alpha));
+ transformation_matrix(1, 2) = static_cast<Scalar>(
+ -std::cos(gamma) * sin(alpha) + sin(gamma) * sin(beta) * std::cos(alpha));
+ transformation_matrix(2, 0) = static_cast<Scalar>(-sin(beta));
+ transformation_matrix(2, 1) = static_cast<Scalar>(std::cos(beta) * sin(alpha));
+ transformation_matrix(2, 2) = static_cast<Scalar>(std::cos(beta) * std::cos(alpha));
+
+ transformation_matrix(0, 3) = static_cast<Scalar>(tx);
+ transformation_matrix(1, 3) = static_cast<Scalar>(ty);
+ transformation_matrix(2, 3) = static_cast<Scalar>(tz);
+ transformation_matrix(3, 3) = static_cast<Scalar>(1);
}
-
-template <typename PointSource, typename PointTarget, typename Scalar> inline void
+template <typename PointSource, typename PointTarget, typename Scalar>
+inline void
TransformationEstimationPointToPlaneLLS<PointSource, PointTarget, Scalar>::
-estimateRigidTransformation (ConstCloudIterator<PointSource>& source_it, ConstCloudIterator<PointTarget>& target_it, Matrix4 &transformation_matrix) const
+ estimateRigidTransformation(ConstCloudIterator<PointSource>& source_it,
+ ConstCloudIterator<PointTarget>& target_it,
+ Matrix4& transformation_matrix) const
{
using Vector6d = Eigen::Matrix<double, 6, 1>;
using Matrix6d = Eigen::Matrix<double, 6, 6>;
Matrix6d ATA;
Vector6d ATb;
- ATA.setZero ();
- ATb.setZero ();
+ ATA.setZero();
+ ATb.setZero();
// Approximate as a linear least squares problem
- while (source_it.isValid () && target_it.isValid ())
- {
- if (!std::isfinite (source_it->x) ||
- !std::isfinite (source_it->y) ||
- !std::isfinite (source_it->z) ||
- !std::isfinite (target_it->x) ||
- !std::isfinite (target_it->y) ||
- !std::isfinite (target_it->z) ||
- !std::isfinite (target_it->normal_x) ||
- !std::isfinite (target_it->normal_y) ||
- !std::isfinite (target_it->normal_z))
- {
+ while (source_it.isValid() && target_it.isValid()) {
+ if (!std::isfinite(source_it->x) || !std::isfinite(source_it->y) ||
+ !std::isfinite(source_it->z) || !std::isfinite(target_it->x) ||
+ !std::isfinite(target_it->y) || !std::isfinite(target_it->z) ||
+ !std::isfinite(target_it->normal_x) || !std::isfinite(target_it->normal_y) ||
+ !std::isfinite(target_it->normal_z)) {
++target_it;
++source_it;
continue;
}
- const float & sx = source_it->x;
- const float & sy = source_it->y;
- const float & sz = source_it->z;
- const float & dx = target_it->x;
- const float & dy = target_it->y;
- const float & dz = target_it->z;
- const float & nx = target_it->normal[0];
- const float & ny = target_it->normal[1];
- const float & nz = target_it->normal[2];
+ const float& sx = source_it->x;
+ const float& sy = source_it->y;
+ const float& sz = source_it->z;
+ const float& dx = target_it->x;
+ const float& dy = target_it->y;
+ const float& dz = target_it->z;
+ const float& nx = target_it->normal[0];
+ const float& ny = target_it->normal[1];
+ const float& nz = target_it->normal[2];
- double a = nz*sy - ny*sz;
- double b = nx*sz - nz*sx;
- double c = ny*sx - nx*sy;
+ double a = nz * sy - ny * sz;
+ double b = nx * sz - nz * sx;
+ double c = ny * sx - nx * sy;
// 0 1 2 3 4 5
// 6 7 8 9 10 11
// 24 25 26 27 28 29
// 30 31 32 33 34 35
- ATA.coeffRef (0) += a * a;
- ATA.coeffRef (1) += a * b;
- ATA.coeffRef (2) += a * c;
- ATA.coeffRef (3) += a * nx;
- ATA.coeffRef (4) += a * ny;
- ATA.coeffRef (5) += a * nz;
- ATA.coeffRef (7) += b * b;
- ATA.coeffRef (8) += b * c;
- ATA.coeffRef (9) += b * nx;
- ATA.coeffRef (10) += b * ny;
- ATA.coeffRef (11) += b * nz;
- ATA.coeffRef (14) += c * c;
- ATA.coeffRef (15) += c * nx;
- ATA.coeffRef (16) += c * ny;
- ATA.coeffRef (17) += c * nz;
- ATA.coeffRef (21) += nx * nx;
- ATA.coeffRef (22) += nx * ny;
- ATA.coeffRef (23) += nx * nz;
- ATA.coeffRef (28) += ny * ny;
- ATA.coeffRef (29) += ny * nz;
- ATA.coeffRef (35) += nz * nz;
-
- double d = nx*dx + ny*dy + nz*dz - nx*sx - ny*sy - nz*sz;
- ATb.coeffRef (0) += a * d;
- ATb.coeffRef (1) += b * d;
- ATb.coeffRef (2) += c * d;
- ATb.coeffRef (3) += nx * d;
- ATb.coeffRef (4) += ny * d;
- ATb.coeffRef (5) += nz * d;
+ ATA.coeffRef(0) += a * a;
+ ATA.coeffRef(1) += a * b;
+ ATA.coeffRef(2) += a * c;
+ ATA.coeffRef(3) += a * nx;
+ ATA.coeffRef(4) += a * ny;
+ ATA.coeffRef(5) += a * nz;
+ ATA.coeffRef(7) += b * b;
+ ATA.coeffRef(8) += b * c;
+ ATA.coeffRef(9) += b * nx;
+ ATA.coeffRef(10) += b * ny;
+ ATA.coeffRef(11) += b * nz;
+ ATA.coeffRef(14) += c * c;
+ ATA.coeffRef(15) += c * nx;
+ ATA.coeffRef(16) += c * ny;
+ ATA.coeffRef(17) += c * nz;
+ ATA.coeffRef(21) += nx * nx;
+ ATA.coeffRef(22) += nx * ny;
+ ATA.coeffRef(23) += nx * nz;
+ ATA.coeffRef(28) += ny * ny;
+ ATA.coeffRef(29) += ny * nz;
+ ATA.coeffRef(35) += nz * nz;
+
+ double d = nx * dx + ny * dy + nz * dz - nx * sx - ny * sy - nz * sz;
+ ATb.coeffRef(0) += a * d;
+ ATb.coeffRef(1) += b * d;
+ ATb.coeffRef(2) += c * d;
+ ATb.coeffRef(3) += nx * d;
+ ATb.coeffRef(4) += ny * d;
+ ATb.coeffRef(5) += nz * d;
++target_it;
++source_it;
}
- ATA.coeffRef (6) = ATA.coeff (1);
- ATA.coeffRef (12) = ATA.coeff (2);
- ATA.coeffRef (13) = ATA.coeff (8);
- ATA.coeffRef (18) = ATA.coeff (3);
- ATA.coeffRef (19) = ATA.coeff (9);
- ATA.coeffRef (20) = ATA.coeff (15);
- ATA.coeffRef (24) = ATA.coeff (4);
- ATA.coeffRef (25) = ATA.coeff (10);
- ATA.coeffRef (26) = ATA.coeff (16);
- ATA.coeffRef (27) = ATA.coeff (22);
- ATA.coeffRef (30) = ATA.coeff (5);
- ATA.coeffRef (31) = ATA.coeff (11);
- ATA.coeffRef (32) = ATA.coeff (17);
- ATA.coeffRef (33) = ATA.coeff (23);
- ATA.coeffRef (34) = ATA.coeff (29);
+ ATA.coeffRef(6) = ATA.coeff(1);
+ ATA.coeffRef(12) = ATA.coeff(2);
+ ATA.coeffRef(13) = ATA.coeff(8);
+ ATA.coeffRef(18) = ATA.coeff(3);
+ ATA.coeffRef(19) = ATA.coeff(9);
+ ATA.coeffRef(20) = ATA.coeff(15);
+ ATA.coeffRef(24) = ATA.coeff(4);
+ ATA.coeffRef(25) = ATA.coeff(10);
+ ATA.coeffRef(26) = ATA.coeff(16);
+ ATA.coeffRef(27) = ATA.coeff(22);
+ ATA.coeffRef(30) = ATA.coeff(5);
+ ATA.coeffRef(31) = ATA.coeff(11);
+ ATA.coeffRef(32) = ATA.coeff(17);
+ ATA.coeffRef(33) = ATA.coeff(23);
+ ATA.coeffRef(34) = ATA.coeff(29);
// Solve A*x = b
- Vector6d x = static_cast<Vector6d> (ATA.inverse () * ATb);
+ Vector6d x = static_cast<Vector6d>(ATA.inverse() * ATb);
// Construct the transformation matrix from x
- constructTransformationMatrix (x (0), x (1), x (2), x (3), x (4), x (5), transformation_matrix);
+ constructTransformationMatrix(
+ x(0), x(1), x(2), x(3), x(4), x(5), transformation_matrix);
}
} // namespace registration
} // namespace pcl
#endif /* PCL_REGISTRATION_TRANSFORMATION_ESTIMATION_POINT_TO_PLANE_LLS_HPP_ */
-
#include <pcl/cloud_iterator.h>
+namespace pcl {
-namespace pcl
-{
-
-namespace registration
-{
+namespace registration {
-template <typename PointSource, typename PointTarget, typename Scalar> inline void
+template <typename PointSource, typename PointTarget, typename Scalar>
+inline void
TransformationEstimationPointToPlaneLLSWeighted<PointSource, PointTarget, Scalar>::
-estimateRigidTransformation (const pcl::PointCloud<PointSource> &cloud_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- Matrix4 &transformation_matrix) const
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ Matrix4& transformation_matrix) const
{
- const auto nr_points = cloud_src.size ();
- if (cloud_tgt.size () != nr_points)
- {
+ const auto nr_points = cloud_src.size();
+ if (cloud_tgt.size() != nr_points) {
PCL_ERROR("[pcl::TransformationEstimationPointToPlaneLLSWeighted::"
"estimateRigidTransformation] Number or points in source (%zu) differs "
"than target (%zu)!\n",
return;
}
- if (weights_.size () != nr_points)
- {
- PCL_ERROR ("[pcl::TransformationEstimationPointToPlaneLLSWeighted::estimateRigidTransformation] Number or weights from the number of correspondences! Use setWeights () to set them.\n");
+ if (weights_.size() != nr_points) {
+ PCL_ERROR("[pcl::TransformationEstimationPointToPlaneLLSWeighted::"
+ "estimateRigidTransformation] Number or weights from the number of "
+ "correspondences! Use setWeights () to set them.\n");
return;
}
- ConstCloudIterator<PointSource> source_it (cloud_src);
- ConstCloudIterator<PointTarget> target_it (cloud_tgt);
- typename std::vector<Scalar>::const_iterator weights_it = weights_.begin ();
- estimateRigidTransformation (source_it, target_it, weights_it, transformation_matrix);
+ ConstCloudIterator<PointSource> source_it(cloud_src);
+ ConstCloudIterator<PointTarget> target_it(cloud_tgt);
+ typename std::vector<Scalar>::const_iterator weights_it = weights_.begin();
+ estimateRigidTransformation(source_it, target_it, weights_it, transformation_matrix);
}
-
-template <typename PointSource, typename PointTarget, typename Scalar> void
+template <typename PointSource, typename PointTarget, typename Scalar>
+void
TransformationEstimationPointToPlaneLLSWeighted<PointSource, PointTarget, Scalar>::
-estimateRigidTransformation (const pcl::PointCloud<PointSource> &cloud_src,
- const std::vector<int> &indices_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- Matrix4 &transformation_matrix) const
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::Indices& indices_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ Matrix4& transformation_matrix) const
{
- const std::size_t nr_points = indices_src.size ();
- if (cloud_tgt.size () != nr_points)
- {
+ const std::size_t nr_points = indices_src.size();
+ if (cloud_tgt.size() != nr_points) {
PCL_ERROR("[pcl::TransformationEstimationPointToPlaneLLSWeighted::"
"estimateRigidTransformation] Number or points in source (%zu) differs "
"than target (%zu)!\n",
return;
}
- if (weights_.size () != nr_points)
- {
- PCL_ERROR ("[pcl::TransformationEstimationPointToPlaneLLSWeighted::estimateRigidTransformation] Number or weights from the number of correspondences! Use setWeights () to set them.\n");
+ if (weights_.size() != nr_points) {
+ PCL_ERROR("[pcl::TransformationEstimationPointToPlaneLLSWeighted::"
+ "estimateRigidTransformation] Number or weights from the number of "
+ "correspondences! Use setWeights () to set them.\n");
return;
}
-
- ConstCloudIterator<PointSource> source_it (cloud_src, indices_src);
- ConstCloudIterator<PointTarget> target_it (cloud_tgt);
- typename std::vector<Scalar>::const_iterator weights_it = weights_.begin ();
- estimateRigidTransformation (source_it, target_it, weights_it, transformation_matrix);
+ ConstCloudIterator<PointSource> source_it(cloud_src, indices_src);
+ ConstCloudIterator<PointTarget> target_it(cloud_tgt);
+ typename std::vector<Scalar>::const_iterator weights_it = weights_.begin();
+ estimateRigidTransformation(source_it, target_it, weights_it, transformation_matrix);
}
-
-template <typename PointSource, typename PointTarget, typename Scalar> inline void
+template <typename PointSource, typename PointTarget, typename Scalar>
+inline void
TransformationEstimationPointToPlaneLLSWeighted<PointSource, PointTarget, Scalar>::
-estimateRigidTransformation (const pcl::PointCloud<PointSource> &cloud_src,
- const std::vector<int> &indices_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- const std::vector<int> &indices_tgt,
- Matrix4 &transformation_matrix) const
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::Indices& indices_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ const pcl::Indices& indices_tgt,
+ Matrix4& transformation_matrix) const
{
- const std::size_t nr_points = indices_src.size ();
- if (indices_tgt.size () != nr_points)
- {
- PCL_ERROR ("[pcl::TransformationEstimationPointToPlaneLLSWeighted::estimateRigidTransformation] Number or points in source (%lu) differs than target (%lu)!\n", indices_src.size (), indices_tgt.size ());
+ const std::size_t nr_points = indices_src.size();
+ if (indices_tgt.size() != nr_points) {
+ PCL_ERROR("[pcl::TransformationEstimationPointToPlaneLLSWeighted::"
+ "estimateRigidTransformation] Number or points in source (%lu) differs "
+ "than target (%lu)!\n",
+ indices_src.size(),
+ indices_tgt.size());
return;
}
- if (weights_.size () != nr_points)
- {
- PCL_ERROR ("[pcl::TransformationEstimationPointToPlaneLLSWeighted::estimateRigidTransformation] Number or weights from the number of correspondences! Use setWeights () to set them.\n");
+ if (weights_.size() != nr_points) {
+ PCL_ERROR("[pcl::TransformationEstimationPointToPlaneLLSWeighted::"
+ "estimateRigidTransformation] Number or weights from the number of "
+ "correspondences! Use setWeights () to set them.\n");
return;
}
- ConstCloudIterator<PointSource> source_it (cloud_src, indices_src);
- ConstCloudIterator<PointTarget> target_it (cloud_tgt, indices_tgt);
- typename std::vector<Scalar>::const_iterator weights_it = weights_.begin ();
- estimateRigidTransformation (source_it, target_it, weights_it, transformation_matrix);
+ ConstCloudIterator<PointSource> source_it(cloud_src, indices_src);
+ ConstCloudIterator<PointTarget> target_it(cloud_tgt, indices_tgt);
+ typename std::vector<Scalar>::const_iterator weights_it = weights_.begin();
+ estimateRigidTransformation(source_it, target_it, weights_it, transformation_matrix);
}
-
-template <typename PointSource, typename PointTarget, typename Scalar> inline void
+template <typename PointSource, typename PointTarget, typename Scalar>
+inline void
TransformationEstimationPointToPlaneLLSWeighted<PointSource, PointTarget, Scalar>::
-estimateRigidTransformation (const pcl::PointCloud<PointSource> &cloud_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- const pcl::Correspondences &correspondences,
- Matrix4 &transformation_matrix) const
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ const pcl::Correspondences& correspondences,
+ Matrix4& transformation_matrix) const
{
- ConstCloudIterator<PointSource> source_it (cloud_src, correspondences, true);
- ConstCloudIterator<PointTarget> target_it (cloud_tgt, correspondences, false);
- std::vector<Scalar> weights (correspondences.size ());
- for (std::size_t i = 0; i < correspondences.size (); ++i)
+ ConstCloudIterator<PointSource> source_it(cloud_src, correspondences, true);
+ ConstCloudIterator<PointTarget> target_it(cloud_tgt, correspondences, false);
+ std::vector<Scalar> weights(correspondences.size());
+ for (std::size_t i = 0; i < correspondences.size(); ++i)
weights[i] = correspondences[i].weight;
- typename std::vector<Scalar>::const_iterator weights_it = weights.begin ();
- estimateRigidTransformation (source_it, target_it, weights_it, transformation_matrix);
+ typename std::vector<Scalar>::const_iterator weights_it = weights.begin();
+ estimateRigidTransformation(source_it, target_it, weights_it, transformation_matrix);
}
-
-template <typename PointSource, typename PointTarget, typename Scalar> inline void
+template <typename PointSource, typename PointTarget, typename Scalar>
+inline void
TransformationEstimationPointToPlaneLLSWeighted<PointSource, PointTarget, Scalar>::
-constructTransformationMatrix (const double & alpha, const double & beta, const double & gamma,
- const double & tx, const double & ty, const double & tz,
- Matrix4 &transformation_matrix) const
+ constructTransformationMatrix(const double& alpha,
+ const double& beta,
+ const double& gamma,
+ const double& tx,
+ const double& ty,
+ const double& tz,
+ Matrix4& transformation_matrix) const
{
// Construct the transformation matrix from rotation and translation
- transformation_matrix = Eigen::Matrix<Scalar, 4, 4>::Zero ();
- transformation_matrix (0, 0) = static_cast<Scalar> ( std::cos (gamma) * std::cos (beta));
- transformation_matrix (0, 1) = static_cast<Scalar> (-sin (gamma) * std::cos (alpha) + std::cos (gamma) * sin (beta) * sin (alpha));
- transformation_matrix (0, 2) = static_cast<Scalar> ( sin (gamma) * sin (alpha) + std::cos (gamma) * sin (beta) * std::cos (alpha));
- transformation_matrix (1, 0) = static_cast<Scalar> ( sin (gamma) * std::cos (beta));
- transformation_matrix (1, 1) = static_cast<Scalar> ( std::cos (gamma) * std::cos (alpha) + sin (gamma) * sin (beta) * sin (alpha));
- transformation_matrix (1, 2) = static_cast<Scalar> (-std::cos (gamma) * sin (alpha) + sin (gamma) * sin (beta) * std::cos (alpha));
- transformation_matrix (2, 0) = static_cast<Scalar> (-sin (beta));
- transformation_matrix (2, 1) = static_cast<Scalar> ( std::cos (beta) * sin (alpha));
- transformation_matrix (2, 2) = static_cast<Scalar> ( std::cos (beta) * std::cos (alpha));
-
- transformation_matrix (0, 3) = static_cast<Scalar> (tx);
- transformation_matrix (1, 3) = static_cast<Scalar> (ty);
- transformation_matrix (2, 3) = static_cast<Scalar> (tz);
- transformation_matrix (3, 3) = static_cast<Scalar> (1);
+ transformation_matrix = Eigen::Matrix<Scalar, 4, 4>::Zero();
+ transformation_matrix(0, 0) = static_cast<Scalar>(std::cos(gamma) * std::cos(beta));
+ transformation_matrix(0, 1) = static_cast<Scalar>(
+ -sin(gamma) * std::cos(alpha) + std::cos(gamma) * sin(beta) * sin(alpha));
+ transformation_matrix(0, 2) = static_cast<Scalar>(
+ sin(gamma) * sin(alpha) + std::cos(gamma) * sin(beta) * std::cos(alpha));
+ transformation_matrix(1, 0) = static_cast<Scalar>(sin(gamma) * std::cos(beta));
+ transformation_matrix(1, 1) = static_cast<Scalar>(
+ std::cos(gamma) * std::cos(alpha) + sin(gamma) * sin(beta) * sin(alpha));
+ transformation_matrix(1, 2) = static_cast<Scalar>(
+ -std::cos(gamma) * sin(alpha) + sin(gamma) * sin(beta) * std::cos(alpha));
+ transformation_matrix(2, 0) = static_cast<Scalar>(-sin(beta));
+ transformation_matrix(2, 1) = static_cast<Scalar>(std::cos(beta) * sin(alpha));
+ transformation_matrix(2, 2) = static_cast<Scalar>(std::cos(beta) * std::cos(alpha));
+
+ transformation_matrix(0, 3) = static_cast<Scalar>(tx);
+ transformation_matrix(1, 3) = static_cast<Scalar>(ty);
+ transformation_matrix(2, 3) = static_cast<Scalar>(tz);
+ transformation_matrix(3, 3) = static_cast<Scalar>(1);
}
-
-template <typename PointSource, typename PointTarget, typename Scalar> inline void
+template <typename PointSource, typename PointTarget, typename Scalar>
+inline void
TransformationEstimationPointToPlaneLLSWeighted<PointSource, PointTarget, Scalar>::
-estimateRigidTransformation (ConstCloudIterator<PointSource>& source_it,
- ConstCloudIterator<PointTarget>& target_it,
- typename std::vector<Scalar>::const_iterator& weights_it,
- Matrix4 &transformation_matrix) const
+ estimateRigidTransformation(
+ ConstCloudIterator<PointSource>& source_it,
+ ConstCloudIterator<PointTarget>& target_it,
+ typename std::vector<Scalar>::const_iterator& weights_it,
+ Matrix4& transformation_matrix) const
{
using Vector6d = Eigen::Matrix<double, 6, 1>;
using Matrix6d = Eigen::Matrix<double, 6, 6>;
Matrix6d ATA;
Vector6d ATb;
- ATA.setZero ();
- ATb.setZero ();
-
- while (source_it.isValid () && target_it.isValid ())
- {
- if (!std::isfinite (source_it->x) ||
- !std::isfinite (source_it->y) ||
- !std::isfinite (source_it->z) ||
- !std::isfinite (target_it->x) ||
- !std::isfinite (target_it->y) ||
- !std::isfinite (target_it->z) ||
- !std::isfinite (target_it->normal_x) ||
- !std::isfinite (target_it->normal_y) ||
- !std::isfinite (target_it->normal_z))
- {
- ++ source_it;
- ++ target_it;
- ++ weights_it;
+ ATA.setZero();
+ ATb.setZero();
+
+ while (source_it.isValid() && target_it.isValid()) {
+ if (!std::isfinite(source_it->x) || !std::isfinite(source_it->y) ||
+ !std::isfinite(source_it->z) || !std::isfinite(target_it->x) ||
+ !std::isfinite(target_it->y) || !std::isfinite(target_it->z) ||
+ !std::isfinite(target_it->normal_x) || !std::isfinite(target_it->normal_y) ||
+ !std::isfinite(target_it->normal_z)) {
+ ++source_it;
+ ++target_it;
+ ++weights_it;
continue;
}
- const float & sx = source_it->x;
- const float & sy = source_it->y;
- const float & sz = source_it->z;
- const float & dx = target_it->x;
- const float & dy = target_it->y;
- const float & dz = target_it->z;
- const float & nx = target_it->normal[0] * (*weights_it);
- const float & ny = target_it->normal[1] * (*weights_it);
- const float & nz = target_it->normal[2] * (*weights_it);
+ const float& sx = source_it->x;
+ const float& sy = source_it->y;
+ const float& sz = source_it->z;
+ const float& dx = target_it->x;
+ const float& dy = target_it->y;
+ const float& dz = target_it->z;
+ const float& nx = target_it->normal[0] * (*weights_it);
+ const float& ny = target_it->normal[1] * (*weights_it);
+ const float& nz = target_it->normal[2] * (*weights_it);
- double a = nz*sy - ny*sz;
- double b = nx*sz - nz*sx;
- double c = ny*sx - nx*sy;
+ double a = nz * sy - ny * sz;
+ double b = nx * sz - nz * sx;
+ double c = ny * sx - nx * sy;
// 0 1 2 3 4 5
// 6 7 8 9 10 11
// 24 25 26 27 28 29
// 30 31 32 33 34 35
- ATA.coeffRef (0) += a * a;
- ATA.coeffRef (1) += a * b;
- ATA.coeffRef (2) += a * c;
- ATA.coeffRef (3) += a * nx;
- ATA.coeffRef (4) += a * ny;
- ATA.coeffRef (5) += a * nz;
- ATA.coeffRef (7) += b * b;
- ATA.coeffRef (8) += b * c;
- ATA.coeffRef (9) += b * nx;
- ATA.coeffRef (10) += b * ny;
- ATA.coeffRef (11) += b * nz;
- ATA.coeffRef (14) += c * c;
- ATA.coeffRef (15) += c * nx;
- ATA.coeffRef (16) += c * ny;
- ATA.coeffRef (17) += c * nz;
- ATA.coeffRef (21) += nx * nx;
- ATA.coeffRef (22) += nx * ny;
- ATA.coeffRef (23) += nx * nz;
- ATA.coeffRef (28) += ny * ny;
- ATA.coeffRef (29) += ny * nz;
- ATA.coeffRef (35) += nz * nz;
-
- double d = nx*dx + ny*dy + nz*dz - nx*sx - ny*sy - nz*sz;
- ATb.coeffRef (0) += a * d;
- ATb.coeffRef (1) += b * d;
- ATb.coeffRef (2) += c * d;
- ATb.coeffRef (3) += nx * d;
- ATb.coeffRef (4) += ny * d;
- ATb.coeffRef (5) += nz * d;
-
- ++ source_it;
- ++ target_it;
- ++ weights_it;
+ ATA.coeffRef(0) += a * a;
+ ATA.coeffRef(1) += a * b;
+ ATA.coeffRef(2) += a * c;
+ ATA.coeffRef(3) += a * nx;
+ ATA.coeffRef(4) += a * ny;
+ ATA.coeffRef(5) += a * nz;
+ ATA.coeffRef(7) += b * b;
+ ATA.coeffRef(8) += b * c;
+ ATA.coeffRef(9) += b * nx;
+ ATA.coeffRef(10) += b * ny;
+ ATA.coeffRef(11) += b * nz;
+ ATA.coeffRef(14) += c * c;
+ ATA.coeffRef(15) += c * nx;
+ ATA.coeffRef(16) += c * ny;
+ ATA.coeffRef(17) += c * nz;
+ ATA.coeffRef(21) += nx * nx;
+ ATA.coeffRef(22) += nx * ny;
+ ATA.coeffRef(23) += nx * nz;
+ ATA.coeffRef(28) += ny * ny;
+ ATA.coeffRef(29) += ny * nz;
+ ATA.coeffRef(35) += nz * nz;
+
+ double d = nx * dx + ny * dy + nz * dz - nx * sx - ny * sy - nz * sz;
+ ATb.coeffRef(0) += a * d;
+ ATb.coeffRef(1) += b * d;
+ ATb.coeffRef(2) += c * d;
+ ATb.coeffRef(3) += nx * d;
+ ATb.coeffRef(4) += ny * d;
+ ATb.coeffRef(5) += nz * d;
+
+ ++source_it;
+ ++target_it;
+ ++weights_it;
}
- ATA.coeffRef (6) = ATA.coeff (1);
- ATA.coeffRef (12) = ATA.coeff (2);
- ATA.coeffRef (13) = ATA.coeff (8);
- ATA.coeffRef (18) = ATA.coeff (3);
- ATA.coeffRef (19) = ATA.coeff (9);
- ATA.coeffRef (20) = ATA.coeff (15);
- ATA.coeffRef (24) = ATA.coeff (4);
- ATA.coeffRef (25) = ATA.coeff (10);
- ATA.coeffRef (26) = ATA.coeff (16);
- ATA.coeffRef (27) = ATA.coeff (22);
- ATA.coeffRef (30) = ATA.coeff (5);
- ATA.coeffRef (31) = ATA.coeff (11);
- ATA.coeffRef (32) = ATA.coeff (17);
- ATA.coeffRef (33) = ATA.coeff (23);
- ATA.coeffRef (34) = ATA.coeff (29);
+ ATA.coeffRef(6) = ATA.coeff(1);
+ ATA.coeffRef(12) = ATA.coeff(2);
+ ATA.coeffRef(13) = ATA.coeff(8);
+ ATA.coeffRef(18) = ATA.coeff(3);
+ ATA.coeffRef(19) = ATA.coeff(9);
+ ATA.coeffRef(20) = ATA.coeff(15);
+ ATA.coeffRef(24) = ATA.coeff(4);
+ ATA.coeffRef(25) = ATA.coeff(10);
+ ATA.coeffRef(26) = ATA.coeff(16);
+ ATA.coeffRef(27) = ATA.coeff(22);
+ ATA.coeffRef(30) = ATA.coeff(5);
+ ATA.coeffRef(31) = ATA.coeff(11);
+ ATA.coeffRef(32) = ATA.coeff(17);
+ ATA.coeffRef(33) = ATA.coeff(23);
+ ATA.coeffRef(34) = ATA.coeff(29);
// Solve A*x = b
- Vector6d x = static_cast<Vector6d> (ATA.inverse () * ATb);
+ Vector6d x = static_cast<Vector6d>(ATA.inverse() * ATb);
// Construct the transformation matrix from x
- constructTransformationMatrix (x (0), x (1), x (2), x (3), x (4), x (5), transformation_matrix);
+ constructTransformationMatrix(
+ x(0), x(1), x(2), x(3), x(4), x(5), transformation_matrix);
}
} // namespace registration
} // namespace pcl
-#endif /* PCL_REGISTRATION_TRANSFORMATION_ESTIMATION_POINT_TO_PLANE_LLS_WEIGHTED_HPP_ */
-
+#endif /* PCL_REGISTRATION_TRANSFORMATION_ESTIMATION_POINT_TO_PLANE_LLS_WEIGHTED_HPP_ \
+ */
#ifndef PCL_REGISTRATION_TRANSFORMATION_ESTIMATION_POINT_TO_PLANE_WEIGHTED_HPP_
#define PCL_REGISTRATION_TRANSFORMATION_ESTIMATION_POINT_TO_PLANE_WEIGHTED_HPP_
+#include <pcl/registration/distances.h>
#include <pcl/registration/warp_point_rigid.h>
#include <pcl/registration/warp_point_rigid_6d.h>
-#include <pcl/registration/distances.h>
-#include <unsupported/Eigen/NonLinearOptimization>
+#include <unsupported/Eigen/NonLinearOptimization>
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointSource, typename PointTarget, typename MatScalar>
-pcl::registration::TransformationEstimationPointToPlaneWeighted<PointSource, PointTarget, MatScalar>::TransformationEstimationPointToPlaneWeighted ()
- : tmp_src_ ()
- , tmp_tgt_ ()
- , tmp_idx_src_ ()
- , tmp_idx_tgt_ ()
- , warp_point_ (new WarpPointRigid6D<PointSource, PointTarget, MatScalar>)
- , use_correspondence_weights_ (true)
-{
-};
+pcl::registration::TransformationEstimationPointToPlaneWeighted<
+ PointSource,
+ PointTarget,
+ MatScalar>::TransformationEstimationPointToPlaneWeighted()
+: tmp_src_()
+, tmp_tgt_()
+, tmp_idx_src_()
+, tmp_idx_tgt_()
+, warp_point_(new WarpPointRigid6D<PointSource, PointTarget, MatScalar>)
+, use_correspondence_weights_(true){};
//////////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointSource, typename PointTarget, typename MatScalar> void
-pcl::registration::TransformationEstimationPointToPlaneWeighted<PointSource, PointTarget, MatScalar>::estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- Matrix4 &transformation_matrix) const
+template <typename PointSource, typename PointTarget, typename MatScalar>
+void
+pcl::registration::
+ TransformationEstimationPointToPlaneWeighted<PointSource, PointTarget, MatScalar>::
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ Matrix4& transformation_matrix) const
{
// <cloud_src,cloud_src> is the source dataset
- if (cloud_src.size () != cloud_tgt.size ())
- {
- PCL_ERROR ("[pcl::registration::TransformationEstimationPointToPlaneWeighted::estimateRigidTransformation] ");
+ if (cloud_src.size() != cloud_tgt.size()) {
+ PCL_ERROR("[pcl::registration::TransformationEstimationPointToPlaneWeighted::"
+ "estimateRigidTransformation] ");
PCL_ERROR("Number or points in source (%zu) differs than target (%zu)!\n",
static_cast<std::size_t>(cloud_src.size()),
static_cast<std::size_t>(cloud_tgt.size()));
return;
}
- if (cloud_src.size () < 4) // need at least 4 samples
+ if (cloud_src.size() < 4) // need at least 4 samples
{
- PCL_ERROR ("[pcl::registration::TransformationEstimationPointToPlaneWeighted::estimateRigidTransformation] ");
+ PCL_ERROR("[pcl::registration::TransformationEstimationPointToPlaneWeighted::"
+ "estimateRigidTransformation] ");
PCL_ERROR("Need at least 4 points to estimate a transform! Source and target have "
"%zu points!\n",
static_cast<std::size_t>(cloud_src.size()));
return;
}
- if (correspondence_weights_.size () != cloud_src.size ())
- {
- PCL_ERROR ("[pcl::registration::TransformationEstimationPointToPlaneWeighted::estimateRigidTransformation] ");
+ if (correspondence_weights_.size() != cloud_src.size()) {
+ PCL_ERROR("[pcl::registration::TransformationEstimationPointToPlaneWeighted::"
+ "estimateRigidTransformation] ");
PCL_ERROR("Number of weights (%zu) differs than number of points (%zu)!\n",
correspondence_weights_.size(),
static_cast<std::size_t>(cloud_src.size()));
return;
}
- int n_unknowns = warp_point_->getDimension ();
- VectorX x (n_unknowns);
- x.setZero ();
-
+ int n_unknowns = warp_point_->getDimension();
+ VectorX x(n_unknowns);
+ x.setZero();
+
// Set temporary pointers
tmp_src_ = &cloud_src;
tmp_tgt_ = &cloud_tgt;
- OptimizationFunctor functor (static_cast<int> (cloud_src.size ()), this);
- Eigen::NumericalDiff<OptimizationFunctor> num_diff (functor);
- Eigen::LevenbergMarquardt<Eigen::NumericalDiff<OptimizationFunctor>, MatScalar> lm (num_diff);
- int info = lm.minimize (x);
+ OptimizationFunctor functor(static_cast<int>(cloud_src.size()), this);
+ Eigen::NumericalDiff<OptimizationFunctor> num_diff(functor);
+ Eigen::LevenbergMarquardt<Eigen::NumericalDiff<OptimizationFunctor>, MatScalar> lm(
+ num_diff);
+ int info = lm.minimize(x);
// Compute the norm of the residuals
- PCL_DEBUG ("[pcl::registration::TransformationEstimationPointToPlaneWeighted::estimateRigidTransformation]");
- PCL_DEBUG ("LM solver finished with exit code %i, having a residual norm of %g. \n", info, lm.fvec.norm ());
- PCL_DEBUG ("Final solution: [%f", x[0]);
- for (int i = 1; i < n_unknowns; ++i)
- PCL_DEBUG (" %f", x[i]);
- PCL_DEBUG ("]\n");
+ PCL_DEBUG("[pcl::registration::TransformationEstimationPointToPlaneWeighted::"
+ "estimateRigidTransformation]");
+ PCL_DEBUG("LM solver finished with exit code %i, having a residual norm of %g. \n",
+ info,
+ lm.fvec.norm());
+ PCL_DEBUG("Final solution: [%f", x[0]);
+ for (int i = 1; i < n_unknowns; ++i)
+ PCL_DEBUG(" %f", x[i]);
+ PCL_DEBUG("]\n");
// Return the correct transformation
- warp_point_->setParam (x);
- transformation_matrix = warp_point_->getTransform ();
+ warp_point_->setParam(x);
+ transformation_matrix = warp_point_->getTransform();
tmp_src_ = NULL;
tmp_tgt_ = NULL;
}
//////////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointSource, typename PointTarget, typename MatScalar> void
-pcl::registration::TransformationEstimationPointToPlaneWeighted<PointSource, PointTarget, MatScalar>::estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const std::vector<int> &indices_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- Matrix4 &transformation_matrix) const
+template <typename PointSource, typename PointTarget, typename MatScalar>
+void
+pcl::registration::
+ TransformationEstimationPointToPlaneWeighted<PointSource, PointTarget, MatScalar>::
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::Indices& indices_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ Matrix4& transformation_matrix) const
{
- if (indices_src.size () != cloud_tgt.size ())
- {
+ if (indices_src.size() != cloud_tgt.size()) {
PCL_ERROR("[pcl::registration::TransformationEstimationPointToPlaneWeighted::"
"estimateRigidTransformation] Number or points in source (%zu) differs "
"than target (%zu)!\n",
return;
}
- if (correspondence_weights_.size () != indices_src.size ())
- {
- PCL_ERROR ("[pcl::registration::TransformationEstimationPointToPlaneWeighted::estimateRigidTransformation] ");
+ if (correspondence_weights_.size() != indices_src.size()) {
+ PCL_ERROR("[pcl::registration::TransformationEstimationPointToPlaneWeighted::"
+ "estimateRigidTransformation] ");
PCL_ERROR("Number of weights (%zu) differs than number of points (%zu)!\n",
correspondence_weights_.size(),
indices_src.size());
return;
}
-
// <cloud_src,cloud_src> is the source dataset
- transformation_matrix.setIdentity ();
+ transformation_matrix.setIdentity();
- const auto nr_correspondences = cloud_tgt.size ();
- std::vector<int> indices_tgt;
+ const auto nr_correspondences = cloud_tgt.size();
+ pcl::Indices indices_tgt;
indices_tgt.resize(nr_correspondences);
for (std::size_t i = 0; i < nr_correspondences; ++i)
indices_tgt[i] = i;
- estimateRigidTransformation(cloud_src, indices_src, cloud_tgt, indices_tgt, transformation_matrix);
+ estimateRigidTransformation(
+ cloud_src, indices_src, cloud_tgt, indices_tgt, transformation_matrix);
}
//////////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointSource, typename PointTarget, typename MatScalar> inline void
-pcl::registration::TransformationEstimationPointToPlaneWeighted<PointSource, PointTarget, MatScalar>::estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const std::vector<int> &indices_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- const std::vector<int> &indices_tgt,
- Matrix4 &transformation_matrix) const
+template <typename PointSource, typename PointTarget, typename MatScalar>
+inline void
+pcl::registration::
+ TransformationEstimationPointToPlaneWeighted<PointSource, PointTarget, MatScalar>::
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::Indices& indices_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ const pcl::Indices& indices_tgt,
+ Matrix4& transformation_matrix) const
{
- if (indices_src.size () != indices_tgt.size ())
- {
- PCL_ERROR ("[pcl::registration::TransformationEstimationPointToPlaneWeighted::estimateRigidTransformation] Number or points in source (%lu) differs than target (%lu)!\n", indices_src.size (), indices_tgt.size ());
+ if (indices_src.size() != indices_tgt.size()) {
+ PCL_ERROR("[pcl::registration::TransformationEstimationPointToPlaneWeighted::"
+ "estimateRigidTransformation] Number or points in source (%lu) differs "
+ "than target (%lu)!\n",
+ indices_src.size(),
+ indices_tgt.size());
return;
}
- if (indices_src.size () < 4) // need at least 4 samples
+ if (indices_src.size() < 4) // need at least 4 samples
{
- PCL_ERROR ("[pcl::IterativeClosestPointNonLinear::estimateRigidTransformationLM] ");
- PCL_ERROR ("Need at least 4 points to estimate a transform! Source and target have %lu points!",
- indices_src.size ());
+ PCL_ERROR("[pcl::IterativeClosestPointNonLinear::estimateRigidTransformationLM] ");
+ PCL_ERROR("Need at least 4 points to estimate a transform! Source and target have "
+ "%lu points!\n",
+ indices_src.size());
return;
}
- if (correspondence_weights_.size () != indices_src.size ())
- {
- PCL_ERROR ("[pcl::registration::TransformationEstimationPointToPlaneWeighted::estimateRigidTransformation] ");
- PCL_ERROR ("Number of weights (%lu) differs than number of points (%lu)!\n",
- correspondence_weights_.size (), indices_src.size ());
+ if (correspondence_weights_.size() != indices_src.size()) {
+ PCL_ERROR("[pcl::registration::TransformationEstimationPointToPlaneWeighted::"
+ "estimateRigidTransformation] ");
+ PCL_ERROR("Number of weights (%lu) differs than number of points (%lu)!\n",
+ correspondence_weights_.size(),
+ indices_src.size());
return;
}
-
- int n_unknowns = warp_point_->getDimension (); // get dimension of unknown space
- VectorX x (n_unknowns);
- x.setConstant (n_unknowns, 0);
+ int n_unknowns = warp_point_->getDimension(); // get dimension of unknown space
+ VectorX x(n_unknowns);
+ x.setConstant(n_unknowns, 0);
// Set temporary pointers
tmp_src_ = &cloud_src;
tmp_idx_src_ = &indices_src;
tmp_idx_tgt_ = &indices_tgt;
- OptimizationFunctorWithIndices functor (static_cast<int> (indices_src.size ()), this);
- Eigen::NumericalDiff<OptimizationFunctorWithIndices> num_diff (functor);
- Eigen::LevenbergMarquardt<Eigen::NumericalDiff<OptimizationFunctorWithIndices>, MatScalar> lm (num_diff);
- int info = lm.minimize (x);
+ OptimizationFunctorWithIndices functor(static_cast<int>(indices_src.size()), this);
+ Eigen::NumericalDiff<OptimizationFunctorWithIndices> num_diff(functor);
+ Eigen::LevenbergMarquardt<Eigen::NumericalDiff<OptimizationFunctorWithIndices>,
+ MatScalar>
+ lm(num_diff);
+ int info = lm.minimize(x);
// Compute the norm of the residuals
- PCL_DEBUG ("[pcl::registration::TransformationEstimationPointToPlaneWeighted::estimateRigidTransformation] LM solver finished with exit code %i, having a residual norm of %g. \n", info, lm.fvec.norm ());
- PCL_DEBUG ("Final solution: [%f", x[0]);
- for (int i = 1; i < n_unknowns; ++i)
- PCL_DEBUG (" %f", x[i]);
- PCL_DEBUG ("]\n");
+ PCL_DEBUG("[pcl::registration::TransformationEstimationPointToPlaneWeighted::"
+ "estimateRigidTransformation] LM solver finished with exit code %i, having "
+ "a residual norm of %g. \n",
+ info,
+ lm.fvec.norm());
+ PCL_DEBUG("Final solution: [%f", x[0]);
+ for (int i = 1; i < n_unknowns; ++i)
+ PCL_DEBUG(" %f", x[i]);
+ PCL_DEBUG("]\n");
// Return the correct transformation
- warp_point_->setParam (x);
- transformation_matrix = warp_point_->getTransform ();
+ warp_point_->setParam(x);
+ transformation_matrix = warp_point_->getTransform();
tmp_src_ = NULL;
tmp_tgt_ = NULL;
}
//////////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointSource, typename PointTarget, typename MatScalar> inline void
-pcl::registration::TransformationEstimationPointToPlaneWeighted<PointSource, PointTarget, MatScalar>::estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- const pcl::Correspondences &correspondences,
- Matrix4 &transformation_matrix) const
+template <typename PointSource, typename PointTarget, typename MatScalar>
+inline void
+pcl::registration::
+ TransformationEstimationPointToPlaneWeighted<PointSource, PointTarget, MatScalar>::
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ const pcl::Correspondences& correspondences,
+ Matrix4& transformation_matrix) const
{
- const int nr_correspondences = static_cast<int> (correspondences.size ());
- std::vector<int> indices_src (nr_correspondences);
- std::vector<int> indices_tgt (nr_correspondences);
- for (int i = 0; i < nr_correspondences; ++i)
- {
+ const int nr_correspondences = static_cast<int>(correspondences.size());
+ pcl::Indices indices_src(nr_correspondences);
+ pcl::Indices indices_tgt(nr_correspondences);
+ for (int i = 0; i < nr_correspondences; ++i) {
indices_src[i] = correspondences[i].index_query;
indices_tgt[i] = correspondences[i].index_match;
}
- if (use_correspondence_weights_)
- {
- correspondence_weights_.resize (nr_correspondences);
+ if (use_correspondence_weights_) {
+ correspondence_weights_.resize(nr_correspondences);
for (std::size_t i = 0; i < nr_correspondences; ++i)
correspondence_weights_[i] = correspondences[i].weight;
}
- estimateRigidTransformation (cloud_src, indices_src, cloud_tgt, indices_tgt, transformation_matrix);
+ estimateRigidTransformation(
+ cloud_src, indices_src, cloud_tgt, indices_tgt, transformation_matrix);
}
//////////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointSource, typename PointTarget, typename MatScalar> int
-pcl::registration::TransformationEstimationPointToPlaneWeighted<PointSource, PointTarget, MatScalar>::OptimizationFunctor::operator () (
- const VectorX &x, VectorX &fvec) const
+template <typename PointSource, typename PointTarget, typename MatScalar>
+int
+pcl::registration::TransformationEstimationPointToPlaneWeighted<
+ PointSource,
+ PointTarget,
+ MatScalar>::OptimizationFunctor::operator()(const VectorX& x, VectorX& fvec) const
{
- const PointCloud<PointSource> & src_points = *estimator_->tmp_src_;
- const PointCloud<PointTarget> & tgt_points = *estimator_->tmp_tgt_;
+ const PointCloud<PointSource>& src_points = *estimator_->tmp_src_;
+ const PointCloud<PointTarget>& tgt_points = *estimator_->tmp_tgt_;
// Initialize the warp function with the given parameters
- estimator_->warp_point_->setParam (x);
+ estimator_->warp_point_->setParam(x);
- // Transform each source point and compute its distance to the corresponding target point
- for (int i = 0; i < values (); ++i)
- {
- const PointSource & p_src = src_points[i];
- const PointTarget & p_tgt = tgt_points[i];
+ // Transform each source point and compute its distance to the corresponding target
+ // point
+ for (int i = 0; i < values(); ++i) {
+ const PointSource& p_src = src_points[i];
+ const PointTarget& p_tgt = tgt_points[i];
// Transform the source point based on the current warp parameters
Vector4 p_src_warped;
- estimator_->warp_point_->warpPoint (p_src, p_src_warped);
+ estimator_->warp_point_->warpPoint(p_src, p_src_warped);
// Estimate the distance (cost function)
- fvec[i] = estimator_->correspondence_weights_[i] * estimator_->computeDistance (p_src_warped, p_tgt);
+ fvec[i] = estimator_->correspondence_weights_[i] *
+ estimator_->computeDistance(p_src_warped, p_tgt);
}
return (0);
}
//////////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointSource, typename PointTarget, typename MatScalar> int
-pcl::registration::TransformationEstimationPointToPlaneWeighted<PointSource, PointTarget, MatScalar>::OptimizationFunctorWithIndices::operator() (
- const VectorX &x, VectorX &fvec) const
+template <typename PointSource, typename PointTarget, typename MatScalar>
+int
+pcl::registration::TransformationEstimationPointToPlaneWeighted<
+ PointSource,
+ PointTarget,
+ MatScalar>::OptimizationFunctorWithIndices::operator()(const VectorX& x,
+ VectorX& fvec) const
{
- const PointCloud<PointSource> & src_points = *estimator_->tmp_src_;
- const PointCloud<PointTarget> & tgt_points = *estimator_->tmp_tgt_;
- const std::vector<int> & src_indices = *estimator_->tmp_idx_src_;
- const std::vector<int> & tgt_indices = *estimator_->tmp_idx_tgt_;
+ const PointCloud<PointSource>& src_points = *estimator_->tmp_src_;
+ const PointCloud<PointTarget>& tgt_points = *estimator_->tmp_tgt_;
+ const pcl::Indices& src_indices = *estimator_->tmp_idx_src_;
+ const pcl::Indices& tgt_indices = *estimator_->tmp_idx_tgt_;
// Initialize the warp function with the given parameters
- estimator_->warp_point_->setParam (x);
+ estimator_->warp_point_->setParam(x);
- // Transform each source point and compute its distance to the corresponding target point
- for (int i = 0; i < values (); ++i)
- {
- const PointSource & p_src = src_points[src_indices[i]];
- const PointTarget & p_tgt = tgt_points[tgt_indices[i]];
+ // Transform each source point and compute its distance to the corresponding target
+ // point
+ for (int i = 0; i < values(); ++i) {
+ const PointSource& p_src = src_points[src_indices[i]];
+ const PointTarget& p_tgt = tgt_points[tgt_indices[i]];
// Transform the source point based on the current warp parameters
Vector4 p_src_warped;
- estimator_->warp_point_->warpPoint (p_src, p_src_warped);
-
+ estimator_->warp_point_->warpPoint(p_src, p_src_warped);
+
// Estimate the distance (cost function)
- fvec[i] = estimator_->correspondence_weights_[i] * estimator_->computeDistance (p_src_warped, p_tgt);
+ fvec[i] = estimator_->correspondence_weights_[i] *
+ estimator_->computeDistance(p_src_warped, p_tgt);
}
return (0);
}
#include <pcl/common/eigen.h>
+namespace pcl {
-namespace pcl
-{
-
-namespace registration
-{
+namespace registration {
-template <typename PointSource, typename PointTarget, typename Scalar> inline void
-TransformationEstimationSVD<PointSource, PointTarget, Scalar>::estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- Matrix4 &transformation_matrix) const
+template <typename PointSource, typename PointTarget, typename Scalar>
+inline void
+TransformationEstimationSVD<PointSource, PointTarget, Scalar>::
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ Matrix4& transformation_matrix) const
{
- const auto nr_points = cloud_src.size ();
- if (cloud_tgt.size () != nr_points)
- {
+ const auto nr_points = cloud_src.size();
+ if (cloud_tgt.size() != nr_points) {
PCL_ERROR("[pcl::TransformationEstimationSVD::estimateRigidTransformation] Number "
"or points in source (%zu) differs than target (%zu)!\n",
static_cast<std::size_t>(nr_points),
return;
}
- ConstCloudIterator<PointSource> source_it (cloud_src);
- ConstCloudIterator<PointTarget> target_it (cloud_tgt);
- estimateRigidTransformation (source_it, target_it, transformation_matrix);
+ ConstCloudIterator<PointSource> source_it(cloud_src);
+ ConstCloudIterator<PointTarget> target_it(cloud_tgt);
+ estimateRigidTransformation(source_it, target_it, transformation_matrix);
}
-
-template <typename PointSource, typename PointTarget, typename Scalar> void
-TransformationEstimationSVD<PointSource, PointTarget, Scalar>::estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const std::vector<int> &indices_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- Matrix4 &transformation_matrix) const
+template <typename PointSource, typename PointTarget, typename Scalar>
+void
+TransformationEstimationSVD<PointSource, PointTarget, Scalar>::
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::Indices& indices_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ Matrix4& transformation_matrix) const
{
- if (indices_src.size () != cloud_tgt.size ())
- {
+ if (indices_src.size() != cloud_tgt.size()) {
PCL_ERROR("[pcl::TransformationSVD::estimateRigidTransformation] Number or points "
"in source (%zu) differs than target (%zu)!\n",
indices_src.size(),
return;
}
- ConstCloudIterator<PointSource> source_it (cloud_src, indices_src);
- ConstCloudIterator<PointTarget> target_it (cloud_tgt);
- estimateRigidTransformation (source_it, target_it, transformation_matrix);
+ ConstCloudIterator<PointSource> source_it(cloud_src, indices_src);
+ ConstCloudIterator<PointTarget> target_it(cloud_tgt);
+ estimateRigidTransformation(source_it, target_it, transformation_matrix);
}
-
-template <typename PointSource, typename PointTarget, typename Scalar> inline void
-TransformationEstimationSVD<PointSource, PointTarget, Scalar>::estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const std::vector<int> &indices_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- const std::vector<int> &indices_tgt,
- Matrix4 &transformation_matrix) const
+template <typename PointSource, typename PointTarget, typename Scalar>
+inline void
+TransformationEstimationSVD<PointSource, PointTarget, Scalar>::
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::Indices& indices_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ const pcl::Indices& indices_tgt,
+ Matrix4& transformation_matrix) const
{
- if (indices_src.size () != indices_tgt.size ())
- {
+ if (indices_src.size() != indices_tgt.size()) {
PCL_ERROR("[pcl::TransformationEstimationSVD::estimateRigidTransformation] Number "
"or points in source (%zu) differs than target (%zu)!\n",
indices_src.size(),
return;
}
- ConstCloudIterator<PointSource> source_it (cloud_src, indices_src);
- ConstCloudIterator<PointTarget> target_it (cloud_tgt, indices_tgt);
- estimateRigidTransformation (source_it, target_it, transformation_matrix);
+ ConstCloudIterator<PointSource> source_it(cloud_src, indices_src);
+ ConstCloudIterator<PointTarget> target_it(cloud_tgt, indices_tgt);
+ estimateRigidTransformation(source_it, target_it, transformation_matrix);
}
-
-template <typename PointSource, typename PointTarget, typename Scalar> void
-TransformationEstimationSVD<PointSource, PointTarget, Scalar>::estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- const pcl::Correspondences &correspondences,
- Matrix4 &transformation_matrix) const
+template <typename PointSource, typename PointTarget, typename Scalar>
+void
+TransformationEstimationSVD<PointSource, PointTarget, Scalar>::
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ const pcl::Correspondences& correspondences,
+ Matrix4& transformation_matrix) const
{
- ConstCloudIterator<PointSource> source_it (cloud_src, correspondences, true);
- ConstCloudIterator<PointTarget> target_it (cloud_tgt, correspondences, false);
- estimateRigidTransformation (source_it, target_it, transformation_matrix);
+ ConstCloudIterator<PointSource> source_it(cloud_src, correspondences, true);
+ ConstCloudIterator<PointTarget> target_it(cloud_tgt, correspondences, false);
+ estimateRigidTransformation(source_it, target_it, transformation_matrix);
}
-
-template <typename PointSource, typename PointTarget, typename Scalar> inline void
-TransformationEstimationSVD<PointSource, PointTarget, Scalar>::estimateRigidTransformation (
- ConstCloudIterator<PointSource>& source_it,
- ConstCloudIterator<PointTarget>& target_it,
- Matrix4 &transformation_matrix) const
+template <typename PointSource, typename PointTarget, typename Scalar>
+inline void
+TransformationEstimationSVD<PointSource, PointTarget, Scalar>::
+ estimateRigidTransformation(ConstCloudIterator<PointSource>& source_it,
+ ConstCloudIterator<PointTarget>& target_it,
+ Matrix4& transformation_matrix) const
{
// Convert to Eigen format
- const int npts = static_cast <int> (source_it.size ());
-
-
+ const int npts = static_cast<int>(source_it.size());
- if (use_umeyama_)
- {
- Eigen::Matrix<Scalar, 3, Eigen::Dynamic> cloud_src (3, npts);
- Eigen::Matrix<Scalar, 3, Eigen::Dynamic> cloud_tgt (3, npts);
+ if (use_umeyama_) {
+ Eigen::Matrix<Scalar, 3, Eigen::Dynamic> cloud_src(3, npts);
+ Eigen::Matrix<Scalar, 3, Eigen::Dynamic> cloud_tgt(3, npts);
- for (int i = 0; i < npts; ++i)
- {
- cloud_src (0, i) = source_it->x;
- cloud_src (1, i) = source_it->y;
- cloud_src (2, i) = source_it->z;
+ for (int i = 0; i < npts; ++i) {
+ cloud_src(0, i) = source_it->x;
+ cloud_src(1, i) = source_it->y;
+ cloud_src(2, i) = source_it->z;
++source_it;
- cloud_tgt (0, i) = target_it->x;
- cloud_tgt (1, i) = target_it->y;
- cloud_tgt (2, i) = target_it->z;
+ cloud_tgt(0, i) = target_it->x;
+ cloud_tgt(1, i) = target_it->y;
+ cloud_tgt(2, i) = target_it->z;
++target_it;
}
// Call Umeyama directly from Eigen (PCL patched version until Eigen is released)
- transformation_matrix = pcl::umeyama (cloud_src, cloud_tgt, false);
+ transformation_matrix = pcl::umeyama(cloud_src, cloud_tgt, false);
}
- else
- {
- source_it.reset (); target_it.reset ();
+ else {
+ source_it.reset();
+ target_it.reset();
// <cloud_src,cloud_src> is the source dataset
- transformation_matrix.setIdentity ();
+ transformation_matrix.setIdentity();
Eigen::Matrix<Scalar, 4, 1> centroid_src, centroid_tgt;
// Estimate the centroids of source, target
- compute3DCentroid (source_it, centroid_src);
- compute3DCentroid (target_it, centroid_tgt);
- source_it.reset (); target_it.reset ();
+ compute3DCentroid(source_it, centroid_src);
+ compute3DCentroid(target_it, centroid_tgt);
+ source_it.reset();
+ target_it.reset();
// Subtract the centroids from source, target
- Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> cloud_src_demean, cloud_tgt_demean;
- demeanPointCloud (source_it, centroid_src, cloud_src_demean);
- demeanPointCloud (target_it, centroid_tgt, cloud_tgt_demean);
-
- getTransformationFromCorrelation (cloud_src_demean, centroid_src, cloud_tgt_demean, centroid_tgt, transformation_matrix);
+ Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> cloud_src_demean,
+ cloud_tgt_demean;
+ demeanPointCloud(source_it, centroid_src, cloud_src_demean);
+ demeanPointCloud(target_it, centroid_tgt, cloud_tgt_demean);
+
+ getTransformationFromCorrelation(cloud_src_demean,
+ centroid_src,
+ cloud_tgt_demean,
+ centroid_tgt,
+ transformation_matrix);
}
}
-
-template <typename PointSource, typename PointTarget, typename Scalar> void
-TransformationEstimationSVD<PointSource, PointTarget, Scalar>::getTransformationFromCorrelation (
- const Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> &cloud_src_demean,
- const Eigen::Matrix<Scalar, 4, 1> ¢roid_src,
- const Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> &cloud_tgt_demean,
- const Eigen::Matrix<Scalar, 4, 1> ¢roid_tgt,
- Matrix4 &transformation_matrix) const
+template <typename PointSource, typename PointTarget, typename Scalar>
+void
+TransformationEstimationSVD<PointSource, PointTarget, Scalar>::
+ getTransformationFromCorrelation(
+ const Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>& cloud_src_demean,
+ const Eigen::Matrix<Scalar, 4, 1>& centroid_src,
+ const Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>& cloud_tgt_demean,
+ const Eigen::Matrix<Scalar, 4, 1>& centroid_tgt,
+ Matrix4& transformation_matrix) const
{
- transformation_matrix.setIdentity ();
+ transformation_matrix.setIdentity();
// Assemble the correlation matrix H = source * target'
- Eigen::Matrix<Scalar, 3, 3> H = (cloud_src_demean * cloud_tgt_demean.transpose ()).topLeftCorner (3, 3);
+ Eigen::Matrix<Scalar, 3, 3> H =
+ (cloud_src_demean * cloud_tgt_demean.transpose()).topLeftCorner(3, 3);
// Compute the Singular Value Decomposition
- Eigen::JacobiSVD<Eigen::Matrix<Scalar, 3, 3> > svd (H, Eigen::ComputeFullU | Eigen::ComputeFullV);
- Eigen::Matrix<Scalar, 3, 3> u = svd.matrixU ();
- Eigen::Matrix<Scalar, 3, 3> v = svd.matrixV ();
+ Eigen::JacobiSVD<Eigen::Matrix<Scalar, 3, 3>> svd(
+ H, Eigen::ComputeFullU | Eigen::ComputeFullV);
+ Eigen::Matrix<Scalar, 3, 3> u = svd.matrixU();
+ Eigen::Matrix<Scalar, 3, 3> v = svd.matrixV();
// Compute R = V * U'
- if (u.determinant () * v.determinant () < 0)
- {
+ if (u.determinant() * v.determinant() < 0) {
for (int x = 0; x < 3; ++x)
- v (x, 2) *= -1;
+ v(x, 2) *= -1;
}
- Eigen::Matrix<Scalar, 3, 3> R = v * u.transpose ();
+ Eigen::Matrix<Scalar, 3, 3> R = v * u.transpose();
// Return the correct transformation
- transformation_matrix.topLeftCorner (3, 3) = R;
- const Eigen::Matrix<Scalar, 3, 1> Rc (R * centroid_src.head (3));
- transformation_matrix.block (0, 3, 3, 1) = centroid_tgt.head (3) - Rc;
+ transformation_matrix.topLeftCorner(3, 3) = R;
+ const Eigen::Matrix<Scalar, 3, 1> Rc(R * centroid_src.head(3));
+ transformation_matrix.block(0, 3, 3, 1) = centroid_tgt.head(3) - Rc;
}
} // namespace registration
} // namespace pcl
-//#define PCL_INSTANTIATE_TransformationEstimationSVD(T,U) template class PCL_EXPORTS pcl::registration::TransformationEstimationSVD<T,U>;
+//#define PCL_INSTANTIATE_TransformationEstimationSVD(T,U) template class PCL_EXPORTS
+// pcl::registration::TransformationEstimationSVD<T,U>;
#endif /* PCL_REGISTRATION_TRANSFORMATION_ESTIMATION_SVD_HPP_ */
-
#ifndef PCL_REGISTRATION_TRANSFORMATION_ESTIMATION_SVD_SCALE_HPP_
#define PCL_REGISTRATION_TRANSFORMATION_ESTIMATION_SVD_SCALE_HPP_
-
-namespace pcl
-{
-
-namespace registration
-{
-
-template <typename PointSource, typename PointTarget, typename Scalar> void
-TransformationEstimationSVDScale<PointSource, PointTarget, Scalar>::getTransformationFromCorrelation (
- const Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> &cloud_src_demean,
- const Eigen::Matrix<Scalar, 4, 1> ¢roid_src,
- const Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> &cloud_tgt_demean,
- const Eigen::Matrix<Scalar, 4, 1> ¢roid_tgt,
- Matrix4 &transformation_matrix) const
+namespace pcl {
+
+namespace registration {
+
+template <typename PointSource, typename PointTarget, typename Scalar>
+void
+TransformationEstimationSVDScale<PointSource, PointTarget, Scalar>::
+ getTransformationFromCorrelation(
+ const Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>& cloud_src_demean,
+ const Eigen::Matrix<Scalar, 4, 1>& centroid_src,
+ const Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>& cloud_tgt_demean,
+ const Eigen::Matrix<Scalar, 4, 1>& centroid_tgt,
+ Matrix4& transformation_matrix) const
{
- transformation_matrix.setIdentity ();
+ transformation_matrix.setIdentity();
// Assemble the correlation matrix H = source * target'
- Eigen::Matrix<Scalar, 3, 3> H = (cloud_src_demean * cloud_tgt_demean.transpose ()).topLeftCorner (3, 3);
+ Eigen::Matrix<Scalar, 3, 3> H =
+ (cloud_src_demean * cloud_tgt_demean.transpose()).topLeftCorner(3, 3);
// Compute the Singular Value Decomposition
- Eigen::JacobiSVD<Eigen::Matrix<Scalar, 3, 3> > svd (H, Eigen::ComputeFullU | Eigen::ComputeFullV);
- Eigen::Matrix<Scalar, 3, 3> u = svd.matrixU ();
- Eigen::Matrix<Scalar, 3, 3> v = svd.matrixV ();
+ Eigen::JacobiSVD<Eigen::Matrix<Scalar, 3, 3>> svd(
+ H, Eigen::ComputeFullU | Eigen::ComputeFullV);
+ Eigen::Matrix<Scalar, 3, 3> u = svd.matrixU();
+ Eigen::Matrix<Scalar, 3, 3> v = svd.matrixV();
// Compute R = V * U'
- if (u.determinant () * v.determinant () < 0)
- {
+ if (u.determinant() * v.determinant() < 0) {
for (int x = 0; x < 3; ++x)
- v (x, 2) *= -1;
+ v(x, 2) *= -1;
}
- Eigen::Matrix<Scalar, 3, 3> R = v * u.transpose ();
+ Eigen::Matrix<Scalar, 3, 3> R = v * u.transpose();
// rotated cloud
Eigen::Matrix<Scalar, 4, 4> R4;
- R4.block (0, 0, 3, 3) = R;
- R4 (0, 3) = 0;
- R4 (1, 3) = 0;
- R4 (2, 3) = 0;
- R4 (3, 3) = 1;
+ R4.block(0, 0, 3, 3) = R;
+ R4(0, 3) = 0;
+ R4(1, 3) = 0;
+ R4(2, 3) = 0;
+ R4(3, 3) = 1;
Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> src_ = R4 * cloud_src_demean;
double sum_ss = 0.0f, sum_tt = 0.0f;
- for (unsigned corrIdx = 0; corrIdx < cloud_src_demean.cols (); ++corrIdx)
- {
- sum_ss += cloud_src_demean (0, corrIdx) * cloud_src_demean (0, corrIdx);
- sum_ss += cloud_src_demean (1, corrIdx) * cloud_src_demean (1, corrIdx);
- sum_ss += cloud_src_demean (2, corrIdx) * cloud_src_demean (2, corrIdx);
-
- sum_tt += cloud_tgt_demean (0, corrIdx) * src_ (0, corrIdx);
- sum_tt += cloud_tgt_demean (1, corrIdx) * src_ (1, corrIdx);
- sum_tt += cloud_tgt_demean (2, corrIdx) * src_ (2, corrIdx);
+ for (unsigned corrIdx = 0; corrIdx < cloud_src_demean.cols(); ++corrIdx) {
+ sum_ss += cloud_src_demean(0, corrIdx) * cloud_src_demean(0, corrIdx);
+ sum_ss += cloud_src_demean(1, corrIdx) * cloud_src_demean(1, corrIdx);
+ sum_ss += cloud_src_demean(2, corrIdx) * cloud_src_demean(2, corrIdx);
+
+ sum_tt += cloud_tgt_demean(0, corrIdx) * src_(0, corrIdx);
+ sum_tt += cloud_tgt_demean(1, corrIdx) * src_(1, corrIdx);
+ sum_tt += cloud_tgt_demean(2, corrIdx) * src_(2, corrIdx);
}
float scale = sum_tt / sum_ss;
- transformation_matrix.topLeftCorner (3, 3) = scale * R;
- const Eigen::Matrix<Scalar, 3, 1> Rc (scale * R * centroid_src.head (3));
- transformation_matrix.block (0, 3, 3, 1) = centroid_tgt. head (3) - Rc;
+ transformation_matrix.topLeftCorner(3, 3) = scale * R;
+ const Eigen::Matrix<Scalar, 3, 1> Rc(scale * R * centroid_src.head(3));
+ transformation_matrix.block(0, 3, 3, 1) = centroid_tgt.head(3) - Rc;
}
} // namespace registration
} // namespace pcl
-//#define PCL_INSTANTIATE_TransformationEstimationSVD(T,U) template class PCL_EXPORTS pcl::registration::TransformationEstimationSVD<T,U>;
+//#define PCL_INSTANTIATE_TransformationEstimationSVD(T,U) template class PCL_EXPORTS
+// pcl::registration::TransformationEstimationSVD<T,U>;
#endif /* PCL_REGISTRATION_TRANSFORMATION_ESTIMATION_SVD_SCALE_HPP_ */
#include <pcl/cloud_iterator.h>
+namespace pcl {
-namespace pcl
-{
-
-namespace registration
-{
+namespace registration {
-template <typename PointSource, typename PointTarget, typename Scalar> inline void
+template <typename PointSource, typename PointTarget, typename Scalar>
+inline void
TransformationEstimationSymmetricPointToPlaneLLS<PointSource, PointTarget, Scalar>::
-estimateRigidTransformation (const pcl::PointCloud<PointSource> &cloud_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- Matrix4 &transformation_matrix) const
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ Matrix4& transformation_matrix) const
{
- const auto nr_points = cloud_src.size ();
- if (cloud_tgt.size () != nr_points)
- {
+ const auto nr_points = cloud_src.size();
+ if (cloud_tgt.size() != nr_points) {
PCL_ERROR("[pcl::TransformationEstimationSymmetricPointToPlaneLLS::"
"estimateRigidTransformation] Number or points in source (%zu) differs "
"from target (%zu)!\n",
return;
}
- ConstCloudIterator<PointSource> source_it (cloud_src);
- ConstCloudIterator<PointTarget> target_it (cloud_tgt);
- estimateRigidTransformation (source_it, target_it, transformation_matrix);
+ ConstCloudIterator<PointSource> source_it(cloud_src);
+ ConstCloudIterator<PointTarget> target_it(cloud_tgt);
+ estimateRigidTransformation(source_it, target_it, transformation_matrix);
}
-
-template <typename PointSource, typename PointTarget, typename Scalar> void
+template <typename PointSource, typename PointTarget, typename Scalar>
+void
TransformationEstimationSymmetricPointToPlaneLLS<PointSource, PointTarget, Scalar>::
-estimateRigidTransformation (const pcl::PointCloud<PointSource> &cloud_src,
- const std::vector<int> &indices_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- Matrix4 &transformation_matrix) const
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::Indices& indices_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ Matrix4& transformation_matrix) const
{
- const auto nr_points = indices_src.size ();
- if (cloud_tgt.size () != nr_points)
- {
+ const auto nr_points = indices_src.size();
+ if (cloud_tgt.size() != nr_points) {
PCL_ERROR("[pcl::TransformationEstimationSymmetricPointToPlaneLLS::"
"estimateRigidTransformation] Number or points in source (%zu) differs "
"than target (%zu)!\n",
return;
}
- ConstCloudIterator<PointSource> source_it (cloud_src, indices_src);
- ConstCloudIterator<PointTarget> target_it (cloud_tgt);
- estimateRigidTransformation (source_it, target_it, transformation_matrix);
+ ConstCloudIterator<PointSource> source_it(cloud_src, indices_src);
+ ConstCloudIterator<PointTarget> target_it(cloud_tgt);
+ estimateRigidTransformation(source_it, target_it, transformation_matrix);
}
-
-template <typename PointSource, typename PointTarget, typename Scalar> inline void
+template <typename PointSource, typename PointTarget, typename Scalar>
+inline void
TransformationEstimationSymmetricPointToPlaneLLS<PointSource, PointTarget, Scalar>::
-estimateRigidTransformation (const pcl::PointCloud<PointSource> &cloud_src,
- const std::vector<int> &indices_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- const std::vector<int> &indices_tgt,
- Matrix4 &transformation_matrix) const
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::Indices& indices_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ const pcl::Indices& indices_tgt,
+ Matrix4& transformation_matrix) const
{
- const auto nr_points = indices_src.size ();
- if (indices_tgt.size () != nr_points)
- {
+ const auto nr_points = indices_src.size();
+ if (indices_tgt.size() != nr_points) {
PCL_ERROR("[pcl::TransformationEstimationSymmetricPointToPlaneLLS::"
"estimateRigidTransformation] Number or points in source (%zu) differs "
"than target (%zu)!\n",
return;
}
- ConstCloudIterator<PointSource> source_it (cloud_src, indices_src);
- ConstCloudIterator<PointTarget> target_it (cloud_tgt, indices_tgt);
- estimateRigidTransformation (source_it, target_it, transformation_matrix);
+ ConstCloudIterator<PointSource> source_it(cloud_src, indices_src);
+ ConstCloudIterator<PointTarget> target_it(cloud_tgt, indices_tgt);
+ estimateRigidTransformation(source_it, target_it, transformation_matrix);
}
-
-template <typename PointSource, typename PointTarget, typename Scalar> inline void
+template <typename PointSource, typename PointTarget, typename Scalar>
+inline void
TransformationEstimationSymmetricPointToPlaneLLS<PointSource, PointTarget, Scalar>::
-estimateRigidTransformation (const pcl::PointCloud<PointSource> &cloud_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- const pcl::Correspondences &correspondences,
- Matrix4 &transformation_matrix) const
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ const pcl::Correspondences& correspondences,
+ Matrix4& transformation_matrix) const
{
- ConstCloudIterator<PointSource> source_it (cloud_src, correspondences, true);
- ConstCloudIterator<PointTarget> target_it (cloud_tgt, correspondences, false);
- estimateRigidTransformation (source_it, target_it, transformation_matrix);
+ ConstCloudIterator<PointSource> source_it(cloud_src, correspondences, true);
+ ConstCloudIterator<PointTarget> target_it(cloud_tgt, correspondences, false);
+ estimateRigidTransformation(source_it, target_it, transformation_matrix);
}
-
-template <typename PointSource, typename PointTarget, typename Scalar> inline void
+template <typename PointSource, typename PointTarget, typename Scalar>
+inline void
TransformationEstimationSymmetricPointToPlaneLLS<PointSource, PointTarget, Scalar>::
-constructTransformationMatrix (const Vector6 ¶meters,
- Matrix4 &transformation_matrix) const
+ constructTransformationMatrix(const Vector6& parameters,
+ Matrix4& transformation_matrix) const
{
// Construct the transformation matrix from rotation and translation
- const Eigen::AngleAxis<Scalar> rotation_z (parameters (2), Eigen::Matrix<Scalar, 3, 1>::UnitZ ());
- const Eigen::AngleAxis<Scalar> rotation_y (parameters (1), Eigen::Matrix<Scalar, 3, 1>::UnitY ());
- const Eigen::AngleAxis<Scalar> rotation_x (parameters (0), Eigen::Matrix<Scalar, 3, 1>::UnitX ());
- const Eigen::Translation<Scalar, 3> translation (parameters (3), parameters (4), parameters (5));
- const Eigen::Transform<Scalar, 3, Eigen::Affine> transform = rotation_z * rotation_y * rotation_x *
- translation *
- rotation_z * rotation_y * rotation_x;
- transformation_matrix = transform.matrix ();
+ const Eigen::AngleAxis<Scalar> rotation_z(parameters(2),
+ Eigen::Matrix<Scalar, 3, 1>::UnitZ());
+ const Eigen::AngleAxis<Scalar> rotation_y(parameters(1),
+ Eigen::Matrix<Scalar, 3, 1>::UnitY());
+ const Eigen::AngleAxis<Scalar> rotation_x(parameters(0),
+ Eigen::Matrix<Scalar, 3, 1>::UnitX());
+ const Eigen::Translation<Scalar, 3> translation(
+ parameters(3), parameters(4), parameters(5));
+ const Eigen::Transform<Scalar, 3, Eigen::Affine> transform =
+ rotation_z * rotation_y * rotation_x * translation * rotation_z * rotation_y *
+ rotation_x;
+ transformation_matrix = transform.matrix();
}
-
-template <typename PointSource, typename PointTarget, typename Scalar> inline void
+template <typename PointSource, typename PointTarget, typename Scalar>
+inline void
TransformationEstimationSymmetricPointToPlaneLLS<PointSource, PointTarget, Scalar>::
-estimateRigidTransformation (ConstCloudIterator<PointSource>& source_it, ConstCloudIterator<PointTarget>& target_it, Matrix4 &transformation_matrix) const
+ estimateRigidTransformation(ConstCloudIterator<PointSource>& source_it,
+ ConstCloudIterator<PointTarget>& target_it,
+ Matrix4& transformation_matrix) const
{
using Matrix6 = Eigen::Matrix<Scalar, 6, 6>;
using Vector3 = Eigen::Matrix<Scalar, 3, 1>;
Matrix6 ATA;
Vector6 ATb;
- ATA.setZero ();
- ATb.setZero ();
- auto M = ATA.template selfadjointView<Eigen::Upper> ();
+ ATA.setZero();
+ ATb.setZero();
+ auto M = ATA.template selfadjointView<Eigen::Upper>();
// Approximate as a linear least squares problem
- source_it.reset ();
- target_it.reset ();
- for (; source_it.isValid () && target_it.isValid (); ++source_it, ++target_it)
- {
- const Vector3 p (source_it->x, source_it->y, source_it->z);
- const Vector3 q (target_it->x, target_it->y, target_it->z);
- const Vector3 n1 (source_it->getNormalVector3fMap().template cast<Scalar>());
- const Vector3 n2 (target_it->getNormalVector3fMap().template cast<Scalar>());
+ source_it.reset();
+ target_it.reset();
+ for (; source_it.isValid() && target_it.isValid(); ++source_it, ++target_it) {
+ const Vector3 p(source_it->x, source_it->y, source_it->z);
+ const Vector3 q(target_it->x, target_it->y, target_it->z);
+ const Vector3 n1(source_it->getNormalVector3fMap().template cast<Scalar>());
+ const Vector3 n2(target_it->getNormalVector3fMap().template cast<Scalar>());
Vector3 n;
- if (enforce_same_direction_normals_)
- {
- if (n1.dot (n2) >= 0.)
- n = n1 + n2;
- else
- n = n1 - n2;
- }
- else
- {
+ if (enforce_same_direction_normals_) {
+ if (n1.dot(n2) >= 0.)
n = n1 + n2;
+ else
+ n = n1 - n2;
+ }
+ else {
+ n = n1 + n2;
}
- if (!p.array().isFinite().all() ||
- !q.array().isFinite().all() ||
- !n.array().isFinite().all())
- {
+ if (!p.array().isFinite().all() || !q.array().isFinite().all() ||
+ !n.array().isFinite().all()) {
continue;
}
Vector6 v;
- v << (p + q).cross (n), n;
- M.rankUpdate (v);
+ v << (p + q).cross(n), n;
+ M.rankUpdate(v);
- ATb += v * (q - p).dot (n);
+ ATb += v * (q - p).dot(n);
}
// Solve A*x = b
- const Vector6 x = M.ldlt ().solve (ATb);
+ const Vector6 x = M.ldlt().solve(ATb);
// Construct the transformation matrix from x
- constructTransformationMatrix (x, transformation_matrix);
+ constructTransformationMatrix(x, transformation_matrix);
}
-
-template <typename PointSource, typename PointTarget, typename Scalar> inline void
+template <typename PointSource, typename PointTarget, typename Scalar>
+inline void
TransformationEstimationSymmetricPointToPlaneLLS<PointSource, PointTarget, Scalar>::
-setEnforceSameDirectionNormals (bool enforce_same_direction_normals)
+ setEnforceSameDirectionNormals(bool enforce_same_direction_normals)
{
- enforce_same_direction_normals_ = enforce_same_direction_normals;
+ enforce_same_direction_normals_ = enforce_same_direction_normals;
}
-
-template <typename PointSource, typename PointTarget, typename Scalar> inline bool
+template <typename PointSource, typename PointTarget, typename Scalar>
+inline bool
TransformationEstimationSymmetricPointToPlaneLLS<PointSource, PointTarget, Scalar>::
-getEnforceSameDirectionNormals ()
+ getEnforceSameDirectionNormals()
{
- return enforce_same_direction_normals_;
+ return enforce_same_direction_normals_;
}
} // namespace registration
} // namespace pcl
-
#ifndef PCL_REGISTRATION_TRANSFORMATION_VALIDATION_EUCLIDEAN_IMPL_H_
#define PCL_REGISTRATION_TRANSFORMATION_VALIDATION_EUCLIDEAN_IMPL_H_
+namespace pcl {
-namespace pcl
-{
-
-namespace registration
-{
+namespace registration {
-template <typename PointSource, typename PointTarget, typename Scalar> double
-TransformationValidationEuclidean<PointSource, PointTarget, Scalar>::validateTransformation (
- const PointCloudSourceConstPtr &cloud_src,
- const PointCloudTargetConstPtr &cloud_tgt,
- const Matrix4 &transformation_matrix) const
+template <typename PointSource, typename PointTarget, typename Scalar>
+double
+TransformationValidationEuclidean<PointSource, PointTarget, Scalar>::
+ validateTransformation(const PointCloudSourceConstPtr& cloud_src,
+ const PointCloudTargetConstPtr& cloud_tgt,
+ const Matrix4& transformation_matrix) const
{
double fitness_score = 0.0;
// Transform the input dataset using the final transformation
pcl::PointCloud<PointSource> input_transformed;
- //transformPointCloud (*cloud_src, input_transformed, transformation_matrix);
- input_transformed.resize (cloud_src->size ());
- for (std::size_t i = 0; i < cloud_src->size (); ++i)
- {
- const PointSource &src = (*cloud_src)[i];
- PointTarget &tgt = input_transformed[i];
- tgt.x = static_cast<float> (transformation_matrix (0, 0) * src.x + transformation_matrix (0, 1) * src.y + transformation_matrix (0, 2) * src.z + transformation_matrix (0, 3));
- tgt.y = static_cast<float> (transformation_matrix (1, 0) * src.x + transformation_matrix (1, 1) * src.y + transformation_matrix (1, 2) * src.z + transformation_matrix (1, 3));
- tgt.z = static_cast<float> (transformation_matrix (2, 0) * src.x + transformation_matrix (2, 1) * src.y + transformation_matrix (2, 2) * src.z + transformation_matrix (2, 3));
- }
+ // transformPointCloud (*cloud_src, input_transformed, transformation_matrix);
+ input_transformed.resize(cloud_src->size());
+ for (std::size_t i = 0; i < cloud_src->size(); ++i) {
+ const PointSource& src = (*cloud_src)[i];
+ PointTarget& tgt = input_transformed[i];
+ tgt.x = static_cast<float>(
+ transformation_matrix(0, 0) * src.x + transformation_matrix(0, 1) * src.y +
+ transformation_matrix(0, 2) * src.z + transformation_matrix(0, 3));
+ tgt.y = static_cast<float>(
+ transformation_matrix(1, 0) * src.x + transformation_matrix(1, 1) * src.y +
+ transformation_matrix(1, 2) * src.z + transformation_matrix(1, 3));
+ tgt.z = static_cast<float>(
+ transformation_matrix(2, 0) * src.x + transformation_matrix(2, 1) * src.y +
+ transformation_matrix(2, 2) * src.z + transformation_matrix(2, 3));
+ }
- typename MyPointRepresentation::ConstPtr point_rep (new MyPointRepresentation);
- if (!force_no_recompute_)
- {
- tree_->setPointRepresentation (point_rep);
- tree_->setInputCloud (cloud_tgt);
+ typename MyPointRepresentation::ConstPtr point_rep(new MyPointRepresentation);
+ if (!force_no_recompute_) {
+ tree_->setPointRepresentation(point_rep);
+ tree_->setInputCloud(cloud_tgt);
}
- std::vector<int> nn_indices (1);
- std::vector<float> nn_dists (1);
+ pcl::Indices nn_indices(1);
+ std::vector<float> nn_dists(1);
// For each point in the source dataset
int nr = 0;
- for (const auto& point: input_transformed)
- {
+ for (const auto& point : input_transformed) {
// Find its nearest neighbor in the target
- tree_->nearestKSearch (point, 1, nn_indices, nn_dists);
+ tree_->nearestKSearch(point, 1, nn_indices, nn_dists);
// Deal with occlusions (incomplete targets)
if (nn_dists[0] > max_range_)
if (nr > 0)
return (fitness_score / nr);
- return (std::numeric_limits<double>::max ());
+ return (std::numeric_limits<double>::max());
}
} // namespace registration
} // namespace pcl
-#endif // PCL_REGISTRATION_TRANSFORMATION_VALIDATION_EUCLIDEAN_IMPL_H_
-
+#endif // PCL_REGISTRATION_TRANSFORMATION_VALIDATION_EUCLIDEAN_IMPL_H_
#pragma once
-#include <pcl/point_cloud.h>
#include <pcl/registration/registration.h>
+#include <pcl/point_cloud.h>
namespace pcl {
- namespace registration {
-
- /** \brief Incremental @ref IterativeClosestPoint class
- *
- * This class provides a way to register a stream of clouds where each cloud will be aligned to the previous cloud.
- *
- * \code
- * IterativeClosestPoint<PointXYZ,PointXYZ>::Ptr icp (new IterativeClosestPoint<PointXYZ,PointXYZ>);
- * icp->setMaxCorrespondenceDistance (0.05);
- * icp->setMaximumIterations (50);
- *
- * IncrementalRegistration<PointXYZ> iicp;
- * iicp.setRegistration (icp);
- *
- * while (true){
- * PointCloud<PointXYZ>::Ptr cloud (new PointCloud<PointXYZ>);
- * read_cloud (*cloud);
- * iicp.registerCloud (cloud);
- *
- * PointCloud<PointXYZ>::Ptr tmp (new PointCloud<PointXYZ>);
- * transformPointCloud (*cloud, *tmp, iicp.getAbsoluteTransform ());
- * write_cloud (*tmp);
- * }
- * \endcode
- *
- * \author Michael 'v4hn' Goerner
- * \ingroup registration
- */
- template <typename PointT, typename Scalar = float>
- class IncrementalRegistration {
- public:
- using PointCloudPtr = typename pcl::PointCloud<PointT>::Ptr;
- using PointCloudConstPtr = typename pcl::PointCloud<PointT>::ConstPtr;
-
- using RegistrationPtr = typename pcl::Registration<PointT,PointT,Scalar>::Ptr;
- using Matrix4 = typename pcl::Registration<PointT,PointT,Scalar>::Matrix4;
-
- IncrementalRegistration ();
-
- /** \brief Empty destructor */
- virtual ~IncrementalRegistration () {}
-
- /** \brief Register new point cloud incrementally
- * \note You have to set a valid registration object with @ref setRegistration before using this
- * \note The class doesn't copy cloud. If you afterwards change cloud, that will affect this class.
- * \param[in] cloud point cloud to register
- * \param[in] delta_estimate estimated transform between last registered cloud and this one
- * \return true if registration converged
- */
- bool
- registerCloud (const PointCloudConstPtr& cloud, const Matrix4& delta_estimate = Matrix4::Identity ());
-
- /** \brief Get estimated transform between the last two registered clouds */
- inline Matrix4
- getDeltaTransform () const;
-
- /** \brief Get estimated overall transform */
- inline Matrix4
- getAbsoluteTransform () const;
-
- /** \brief Reset incremental Registration without resetting registration_ */
- inline void
- reset ();
-
- /** \brief Set registration instance used to align clouds */
- inline void
- setRegistration (RegistrationPtr);
- protected:
-
- /** \brief last registered point cloud */
- PointCloudConstPtr last_cloud_;
-
- /** \brief registration instance to align clouds */
- RegistrationPtr registration_;
-
- /** \brief estimated transforms */
- Matrix4 delta_transform_;
- Matrix4 abs_transform_;
- };
-
- }
-}
+namespace registration {
+
+/** \brief Incremental @ref IterativeClosestPoint class
+ *
+ * This class provides a way to register a stream of clouds where each cloud will be
+ * aligned to the previous cloud.
+ *
+ * \code
+ * IterativeClosestPoint<PointXYZ,PointXYZ>::Ptr icp
+ * (new IterativeClosestPoint<PointXYZ,PointXYZ>);
+ * icp->setMaxCorrespondenceDistance (0.05);
+ * icp->setMaximumIterations (50);
+ *
+ * IncrementalRegistration<PointXYZ> iicp;
+ * iicp.setRegistration (icp);
+ *
+ * while (true){
+ * PointCloud<PointXYZ>::Ptr cloud (new PointCloud<PointXYZ>);
+ * read_cloud (*cloud);
+ * iicp.registerCloud (cloud);
+ *
+ * PointCloud<PointXYZ>::Ptr tmp (new PointCloud<PointXYZ>);
+ * transformPointCloud (*cloud, *tmp, iicp.getAbsoluteTransform ());
+ * write_cloud (*tmp);
+ * }
+ * \endcode
+ *
+ * \author Michael 'v4hn' Goerner
+ * \ingroup registration
+ */
+template <typename PointT, typename Scalar = float>
+class IncrementalRegistration {
+public:
+ using PointCloudPtr = typename pcl::PointCloud<PointT>::Ptr;
+ using PointCloudConstPtr = typename pcl::PointCloud<PointT>::ConstPtr;
+
+ using RegistrationPtr = typename pcl::Registration<PointT, PointT, Scalar>::Ptr;
+ using Matrix4 = typename pcl::Registration<PointT, PointT, Scalar>::Matrix4;
+
+ IncrementalRegistration();
+
+ /** \brief Empty destructor */
+ virtual ~IncrementalRegistration() {}
+
+ /** \brief Register new point cloud incrementally
+ * \note You have to set a valid registration object with @ref setRegistration before
+ * using this \note The class doesn't copy cloud. If you afterwards change cloud, that
+ * will affect this class. \param[in] cloud point cloud to register \param[in]
+ * delta_estimate estimated transform between last registered cloud and this one
+ * \return true if registration converged
+ */
+ bool
+ registerCloud(const PointCloudConstPtr& cloud,
+ const Matrix4& delta_estimate = Matrix4::Identity());
+
+ /** \brief Get estimated transform between the last two registered clouds */
+ inline Matrix4
+ getDeltaTransform() const;
+
+ /** \brief Get estimated overall transform */
+ inline Matrix4
+ getAbsoluteTransform() const;
+
+ /** \brief Reset incremental Registration without resetting registration_ */
+ inline void
+ reset();
+
+ /** \brief Set registration instance used to align clouds */
+ inline void setRegistration(RegistrationPtr);
+
+protected:
+ /** \brief last registered point cloud */
+ PointCloudConstPtr last_cloud_;
+
+ /** \brief registration instance to align clouds */
+ RegistrationPtr registration_;
+
+ /** \brief estimated transforms */
+ Matrix4 delta_transform_;
+ Matrix4 abs_transform_;
+};
+
+} // namespace registration
+} // namespace pcl
#include <pcl/registration/impl/incremental_registration.hpp>
/*
* Software License Agreement (BSD License)
- *
+ *
* Point Cloud Library (PCL) - www.pointclouds.org
* Copyright (c) 2009-2012, Willow Garage, Inc.
* Copyright (c) 2012-, Open Perception, Inc.
- *
+ *
* All rights reserved.
- *
+ *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
- *
+ *
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* * Neither the name of the copyright holder(s) nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
- *
+ *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// PCL includes
#include <pcl/registration/icp.h>
-namespace pcl
-{
- /** \brief @b JointIterativeClosestPoint extends ICP to multiple frames which
- * share the same transform. This is particularly useful when solving for
- * camera extrinsics using multiple observations. When given a single pair of
- * clouds, this reduces to vanilla ICP.
- *
- * \author Stephen Miller
- * \ingroup registration
- */
- template <typename PointSource, typename PointTarget, typename Scalar = float>
- class JointIterativeClosestPoint : public IterativeClosestPoint<PointSource, PointTarget, Scalar>
+namespace pcl {
+/** \brief @b JointIterativeClosestPoint extends ICP to multiple frames which
+ * share the same transform. This is particularly useful when solving for
+ * camera extrinsics using multiple observations. When given a single pair of
+ * clouds, this reduces to vanilla ICP.
+ *
+ * \author Stephen Miller
+ * \ingroup registration
+ */
+template <typename PointSource, typename PointTarget, typename Scalar = float>
+class JointIterativeClosestPoint
+: public IterativeClosestPoint<PointSource, PointTarget, Scalar> {
+public:
+ using PointCloudSource = typename IterativeClosestPoint<PointSource,
+ PointTarget,
+ Scalar>::PointCloudSource;
+ using PointCloudSourcePtr = typename PointCloudSource::Ptr;
+ using PointCloudSourceConstPtr = typename PointCloudSource::ConstPtr;
+
+ using PointCloudTarget = typename IterativeClosestPoint<PointSource,
+ PointTarget,
+ Scalar>::PointCloudTarget;
+ using PointCloudTargetPtr = typename PointCloudTarget::Ptr;
+ using PointCloudTargetConstPtr = typename PointCloudTarget::ConstPtr;
+
+ using KdTree = pcl::search::KdTree<PointTarget>;
+ using KdTreePtr = typename KdTree::Ptr;
+
+ using KdTreeReciprocal = pcl::search::KdTree<PointSource>;
+ using KdTreeReciprocalPtr = typename KdTree::Ptr;
+
+ using PointIndicesPtr = PointIndices::Ptr;
+ using PointIndicesConstPtr = PointIndices::ConstPtr;
+
+ using Ptr = shared_ptr<JointIterativeClosestPoint<PointSource, PointTarget, Scalar>>;
+ using ConstPtr =
+ shared_ptr<const JointIterativeClosestPoint<PointSource, PointTarget, Scalar>>;
+
+ using CorrespondenceEstimation =
+ pcl::registration::CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>;
+ using CorrespondenceEstimationPtr = typename CorrespondenceEstimation::Ptr;
+ using CorrespondenceEstimationConstPtr = typename CorrespondenceEstimation::ConstPtr;
+
+ using IterativeClosestPoint<PointSource, PointTarget, Scalar>::reg_name_;
+ using IterativeClosestPoint<PointSource, PointTarget, Scalar>::getClassName;
+ using IterativeClosestPoint<PointSource, PointTarget, Scalar>::setInputSource;
+ using IterativeClosestPoint<PointSource, PointTarget, Scalar>::input_;
+ using IterativeClosestPoint<PointSource, PointTarget, Scalar>::indices_;
+ using IterativeClosestPoint<PointSource, PointTarget, Scalar>::target_;
+ using IterativeClosestPoint<PointSource, PointTarget, Scalar>::nr_iterations_;
+ using IterativeClosestPoint<PointSource, PointTarget, Scalar>::max_iterations_;
+ using IterativeClosestPoint<PointSource, PointTarget, Scalar>::
+ previous_transformation_;
+ using IterativeClosestPoint<PointSource, PointTarget, Scalar>::final_transformation_;
+ using IterativeClosestPoint<PointSource, PointTarget, Scalar>::transformation_;
+ using IterativeClosestPoint<PointSource, PointTarget, Scalar>::
+ transformation_epsilon_;
+ using IterativeClosestPoint<PointSource, PointTarget, Scalar>::converged_;
+ using IterativeClosestPoint<PointSource, PointTarget, Scalar>::corr_dist_threshold_;
+ using IterativeClosestPoint<PointSource, PointTarget, Scalar>::inlier_threshold_;
+ using IterativeClosestPoint<PointSource, PointTarget, Scalar>::
+ min_number_correspondences_;
+ using IterativeClosestPoint<PointSource, PointTarget, Scalar>::update_visualizer_;
+ using IterativeClosestPoint<PointSource, PointTarget, Scalar>::
+ euclidean_fitness_epsilon_;
+ using IterativeClosestPoint<PointSource, PointTarget, Scalar>::correspondences_;
+ using IterativeClosestPoint<PointSource, PointTarget, Scalar>::
+ transformation_estimation_;
+ using IterativeClosestPoint<PointSource, PointTarget, Scalar>::
+ correspondence_estimation_;
+ using IterativeClosestPoint<PointSource, PointTarget, Scalar>::
+ correspondence_rejectors_;
+
+ using IterativeClosestPoint<PointSource, PointTarget, Scalar>::
+ use_reciprocal_correspondence_;
+
+ using IterativeClosestPoint<PointSource, PointTarget, Scalar>::convergence_criteria_;
+ using IterativeClosestPoint<PointSource, PointTarget, Scalar>::source_has_normals_;
+ using IterativeClosestPoint<PointSource, PointTarget, Scalar>::target_has_normals_;
+ using IterativeClosestPoint<PointSource, PointTarget, Scalar>::need_source_blob_;
+ using IterativeClosestPoint<PointSource, PointTarget, Scalar>::need_target_blob_;
+
+ using Matrix4 =
+ typename IterativeClosestPoint<PointSource, PointTarget, Scalar>::Matrix4;
+
+ /** \brief Empty constructor. */
+ JointIterativeClosestPoint()
{
- public:
- using PointCloudSource = typename IterativeClosestPoint<PointSource, PointTarget, Scalar>::PointCloudSource;
- using PointCloudSourcePtr = typename PointCloudSource::Ptr;
- using PointCloudSourceConstPtr = typename PointCloudSource::ConstPtr;
-
- using PointCloudTarget = typename IterativeClosestPoint<PointSource, PointTarget, Scalar>::PointCloudTarget;
- using PointCloudTargetPtr = typename PointCloudTarget::Ptr;
- using PointCloudTargetConstPtr = typename PointCloudTarget::ConstPtr;
-
- using KdTree = pcl::search::KdTree<PointTarget>;
- using KdTreePtr = typename KdTree::Ptr;
-
- using KdTreeReciprocal = pcl::search::KdTree<PointSource>;
- using KdTreeReciprocalPtr = typename KdTree::Ptr;
-
-
- using PointIndicesPtr = PointIndices::Ptr;
- using PointIndicesConstPtr = PointIndices::ConstPtr;
-
- using Ptr = shared_ptr<JointIterativeClosestPoint<PointSource, PointTarget, Scalar> >;
- using ConstPtr = shared_ptr<const JointIterativeClosestPoint<PointSource, PointTarget, Scalar> >;
-
- using CorrespondenceEstimation = pcl::registration::CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>;
- using CorrespondenceEstimationPtr = typename CorrespondenceEstimation::Ptr;
- using CorrespondenceEstimationConstPtr = typename CorrespondenceEstimation::ConstPtr;
-
-
- using IterativeClosestPoint<PointSource, PointTarget, Scalar>::reg_name_;
- using IterativeClosestPoint<PointSource, PointTarget, Scalar>::getClassName;
- using IterativeClosestPoint<PointSource, PointTarget, Scalar>::setInputSource;
- using IterativeClosestPoint<PointSource, PointTarget, Scalar>::input_;
- using IterativeClosestPoint<PointSource, PointTarget, Scalar>::indices_;
- using IterativeClosestPoint<PointSource, PointTarget, Scalar>::target_;
- using IterativeClosestPoint<PointSource, PointTarget, Scalar>::nr_iterations_;
- using IterativeClosestPoint<PointSource, PointTarget, Scalar>::max_iterations_;
- using IterativeClosestPoint<PointSource, PointTarget, Scalar>::previous_transformation_;
- using IterativeClosestPoint<PointSource, PointTarget, Scalar>::final_transformation_;
- using IterativeClosestPoint<PointSource, PointTarget, Scalar>::transformation_;
- using IterativeClosestPoint<PointSource, PointTarget, Scalar>::transformation_epsilon_;
- using IterativeClosestPoint<PointSource, PointTarget, Scalar>::converged_;
- using IterativeClosestPoint<PointSource, PointTarget, Scalar>::corr_dist_threshold_;
- using IterativeClosestPoint<PointSource, PointTarget, Scalar>::inlier_threshold_;
- using IterativeClosestPoint<PointSource, PointTarget, Scalar>::min_number_correspondences_;
- using IterativeClosestPoint<PointSource, PointTarget, Scalar>::update_visualizer_;
- using IterativeClosestPoint<PointSource, PointTarget, Scalar>::euclidean_fitness_epsilon_;
- using IterativeClosestPoint<PointSource, PointTarget, Scalar>::correspondences_;
- using IterativeClosestPoint<PointSource, PointTarget, Scalar>::transformation_estimation_;
- using IterativeClosestPoint<PointSource, PointTarget, Scalar>::correspondence_estimation_;
- using IterativeClosestPoint<PointSource, PointTarget, Scalar>::correspondence_rejectors_;
-
- using IterativeClosestPoint<PointSource, PointTarget, Scalar>::use_reciprocal_correspondence_;
-
- using IterativeClosestPoint<PointSource, PointTarget, Scalar>::convergence_criteria_;
- using IterativeClosestPoint<PointSource, PointTarget, Scalar>::source_has_normals_;
- using IterativeClosestPoint<PointSource, PointTarget, Scalar>::target_has_normals_;
- using IterativeClosestPoint<PointSource, PointTarget, Scalar>::need_source_blob_;
- using IterativeClosestPoint<PointSource, PointTarget, Scalar>::need_target_blob_;
-
-
- using Matrix4 = typename IterativeClosestPoint<PointSource, PointTarget, Scalar>::Matrix4;
-
- /** \brief Empty constructor. */
- JointIterativeClosestPoint ()
- {
- IterativeClosestPoint<PointSource, PointTarget, Scalar> ();
- reg_name_ = "JointIterativeClosestPoint";
- };
-
- /** \brief Empty destructor */
- ~JointIterativeClosestPoint () {}
-
-
- /** \brief Provide a pointer to the input source
- * (e.g., the point cloud that we want to align to the target)
- */
- void
- setInputSource (const PointCloudSourceConstPtr& /*cloud*/) override
- {
- PCL_WARN ("[pcl::%s::setInputSource] Warning; JointIterativeClosestPoint expects multiple clouds. Please use addInputSource.",
- getClassName ().c_str ());
- return;
- }
-
- /** \brief Add a source cloud to the joint solver
- *
- * \param[in] cloud source cloud
- */
- inline void
- addInputSource (const PointCloudSourceConstPtr &cloud)
- {
- // Set the parent InputSource, just to get all cached values (e.g. the existence of normals).
- if (sources_.empty ())
- IterativeClosestPoint<PointSource, PointTarget, Scalar>::setInputSource (cloud);
- sources_.push_back (cloud);
- }
-
- /** \brief Provide a pointer to the input target
- * (e.g., the point cloud that we want to align to the target)
- */
- void
- setInputTarget (const PointCloudTargetConstPtr& /*cloud*/) override
- {
- PCL_WARN ("[pcl::%s::setInputTarget] Warning; JointIterativeClosestPoint expects multiple clouds. Please use addInputTarget.",
- getClassName ().c_str ());
- return;
- }
-
- /** \brief Add a target cloud to the joint solver
- *
- * \param[in] cloud target cloud
- */
- inline void
- addInputTarget (const PointCloudTargetConstPtr &cloud)
- {
- // Set the parent InputTarget, just to get all cached values (e.g. the existence of normals).
- if (targets_.empty ())
- IterativeClosestPoint<PointSource, PointTarget, Scalar>::setInputTarget (cloud);
- targets_.push_back (cloud);
- }
-
- /** \brief Add a manual correspondence estimator
- * If you choose to do this, you must add one for each
- * input source / target pair. They do not need to have trees
- * or input clouds set ahead of time.
- *
- * \param[in] ce Correspondence estimation
- */
- inline void
- addCorrespondenceEstimation (CorrespondenceEstimationPtr ce)
- {
- correspondence_estimations_.push_back (ce);
- }
-
- /** \brief Reset my list of input sources
- */
- inline void
- clearInputSources ()
- { sources_.clear (); }
-
- /** \brief Reset my list of input targets
- */
- inline void
- clearInputTargets ()
- { targets_.clear (); }
-
- /** \brief Reset my list of correspondence estimation methods.
- */
- inline void
- clearCorrespondenceEstimations ()
- { correspondence_estimations_.clear (); }
-
-
- protected:
-
- /** \brief Rigid transformation computation method with initial guess.
- * \param output the transformed input point cloud dataset using the rigid transformation found
- * \param guess the initial guess of the transformation to compute
- */
- void
- computeTransformation (PointCloudSource &output, const Matrix4 &guess) override;
-
- /** \brief Looks at the Estimators and Rejectors and determines whether their blob-setter methods need to be called */
- void
- determineRequiredBlobData () override;
-
- std::vector<PointCloudSourceConstPtr> sources_;
- std::vector<PointCloudTargetConstPtr> targets_;
- std::vector<CorrespondenceEstimationPtr> correspondence_estimations_;
+ IterativeClosestPoint<PointSource, PointTarget, Scalar>();
+ reg_name_ = "JointIterativeClosestPoint";
};
-}
+ /** \brief Empty destructor */
+ ~JointIterativeClosestPoint() {}
+
+ /** \brief Provide a pointer to the input source
+ * (e.g., the point cloud that we want to align to the target)
+ */
+ void
+ setInputSource(const PointCloudSourceConstPtr& /*cloud*/) override
+ {
+ PCL_WARN("[pcl::%s::setInputSource] Warning; JointIterativeClosestPoint expects "
+ "multiple clouds. Please use addInputSource.\n",
+ getClassName().c_str());
+ return;
+ }
+
+ /** \brief Add a source cloud to the joint solver
+ *
+ * \param[in] cloud source cloud
+ */
+ inline void
+ addInputSource(const PointCloudSourceConstPtr& cloud)
+ {
+ // Set the parent InputSource, just to get all cached values (e.g. the existence of
+ // normals).
+ if (sources_.empty())
+ IterativeClosestPoint<PointSource, PointTarget, Scalar>::setInputSource(cloud);
+ sources_.push_back(cloud);
+ }
+
+ /** \brief Provide a pointer to the input target
+ * (e.g., the point cloud that we want to align to the target)
+ */
+ void
+ setInputTarget(const PointCloudTargetConstPtr& /*cloud*/) override
+ {
+ PCL_WARN("[pcl::%s::setInputTarget] Warning; JointIterativeClosestPoint expects "
+ "multiple clouds. Please use addInputTarget.\n",
+ getClassName().c_str());
+ return;
+ }
+
+ /** \brief Add a target cloud to the joint solver
+ *
+ * \param[in] cloud target cloud
+ */
+ inline void
+ addInputTarget(const PointCloudTargetConstPtr& cloud)
+ {
+ // Set the parent InputTarget, just to get all cached values (e.g. the existence of
+ // normals).
+ if (targets_.empty())
+ IterativeClosestPoint<PointSource, PointTarget, Scalar>::setInputTarget(cloud);
+ targets_.push_back(cloud);
+ }
+
+ /** \brief Add a manual correspondence estimator
+ * If you choose to do this, you must add one for each
+ * input source / target pair. They do not need to have trees
+ * or input clouds set ahead of time.
+ *
+ * \param[in] ce Correspondence estimation
+ */
+ inline void
+ addCorrespondenceEstimation(CorrespondenceEstimationPtr ce)
+ {
+ correspondence_estimations_.push_back(ce);
+ }
+
+ /** \brief Reset my list of input sources
+ */
+ inline void
+ clearInputSources()
+ {
+ sources_.clear();
+ }
+
+ /** \brief Reset my list of input targets
+ */
+ inline void
+ clearInputTargets()
+ {
+ targets_.clear();
+ }
+
+ /** \brief Reset my list of correspondence estimation methods.
+ */
+ inline void
+ clearCorrespondenceEstimations()
+ {
+ correspondence_estimations_.clear();
+ }
+
+protected:
+ /** \brief Rigid transformation computation method with initial guess.
+ * \param output the transformed input point cloud dataset using the rigid
+ * transformation found \param guess the initial guess of the transformation to
+ * compute
+ */
+ void
+ computeTransformation(PointCloudSource& output, const Matrix4& guess) override;
+
+ /** \brief Looks at the Estimators and Rejectors and determines whether their
+ * blob-setter methods need to be called */
+ void
+ determineRequiredBlobData() override;
+
+ std::vector<PointCloudSourceConstPtr> sources_;
+ std::vector<PointCloudTargetConstPtr> targets_;
+ std::vector<CorrespondenceEstimationPtr> correspondence_estimations_;
+};
+
+} // namespace pcl
#include <pcl/registration/impl/joint_icp.hpp>
#pragma once
-#include <pcl/memory.h>
-#include <pcl/pcl_macros.h>
-#include <pcl/pcl_base.h>
-#include <pcl/registration/eigen.h>
-#include <pcl/registration/boost.h>
#include <pcl/common/transforms.h>
-#include <pcl/correspondence.h>
#include <pcl/registration/boost_graph.h>
+#include <pcl/correspondence.h>
+#include <pcl/memory.h>
+#include <pcl/pcl_base.h>
+#include <pcl/pcl_macros.h>
-namespace Eigen
-{
- using Vector6f = Eigen::Matrix<float, 6, 1>;
- using Matrix6f = Eigen::Matrix<float, 6, 6>;
-}
-
-namespace pcl
-{
- namespace registration
- {
- /** \brief Globally Consistent Scan Matching based on an algorithm by Lu and Milios.
- * \details A GraphSLAM algorithm where registration data is managed in a graph:
- * <ul>
- * <li>Vertices represent poses and hold the point cloud data and relative transformations.</li>
- * <li>Edges represent pose constraints and hold the correspondence data between two point clouds.</li>
- * </ul>
- * Computation uses the first point cloud in the SLAM graph as a reference pose and attempts to align all other point clouds to it simultaneously.
- * For more information:
- * <ul><li>
- * F. Lu, E. Milios,
- * Globally Consistent Range Scan Alignment for Environment Mapping,
- * Autonomous Robots 4, April 1997
- * </li><li>
- * Dorit Borrmann, Jan Elseberg, Kai Lingemann, Andreas Nüchter, and Joachim Hertzberg,
- * The Efficient Extension of Globally Consistent Scan Matching to 6 DoF,
- * In Proceedings of the 4th International Symposium on 3D Data Processing, Visualization and Transmission (3DPVT '08), June 2008
- * </li></ul>
- * Usage example:
- * \code
- * pcl::registration::LUM<pcl::PointXYZ> lum;
- * // Add point clouds as vertices to the SLAM graph
- * lum.addPointCloud (cloud_0);
- * lum.addPointCloud (cloud_1);
- * lum.addPointCloud (cloud_2);
- * // Use your favorite pairwise correspondence estimation algorithm(s)
- * corrs_0_to_1 = someAlgo (cloud_0, cloud_1);
- * corrs_1_to_2 = someAlgo (cloud_1, cloud_2);
- * corrs_2_to_0 = someAlgo (lum.getPointCloud (2), lum.getPointCloud (0));
- * // Add the correspondence results as edges to the SLAM graph
- * lum.setCorrespondences (0, 1, corrs_0_to_1);
- * lum.setCorrespondences (1, 2, corrs_1_to_2);
- * lum.setCorrespondences (2, 0, corrs_2_to_0);
- * // Change the computation parameters
- * lum.setMaxIterations (5);
- * lum.setConvergenceThreshold (0.0);
- * // Perform the actual LUM computation
- * lum.compute ();
- * // Return the concatenated point cloud result
- * cloud_out = lum.getConcatenatedCloud ();
- * // Return the separate point cloud transformations
- * for(int i = 0; i < lum.getNumVertices (); i++)
- * {
- * transforms_out[i] = lum.getTransformation (i);
- * }
- * \endcode
- * \author Frits Florentinus, Jochen Sprickerhof
- * \ingroup registration
- */
- template<typename PointT>
- class LUM
- {
- public:
- using Ptr = shared_ptr<LUM<PointT> >;
- using ConstPtr = shared_ptr<const LUM<PointT> >;
-
- using PointCloud = pcl::PointCloud<PointT>;
- using PointCloudPtr = typename PointCloud::Ptr;
- using PointCloudConstPtr = typename PointCloud::ConstPtr;
-
- struct VertexProperties
- {
- PointCloudPtr cloud_;
- Eigen::Vector6f pose_;
- PCL_MAKE_ALIGNED_OPERATOR_NEW
- };
- struct EdgeProperties
- {
- pcl::CorrespondencesPtr corrs_;
- Eigen::Matrix6f cinv_;
- Eigen::Vector6f cinvd_;
- PCL_MAKE_ALIGNED_OPERATOR_NEW
- };
-
- using SLAMGraph = boost::adjacency_list<boost::eigen_vecS, boost::eigen_vecS, boost::bidirectionalS, VertexProperties, EdgeProperties, boost::no_property, boost::eigen_listS>;
- using SLAMGraphPtr = shared_ptr<SLAMGraph>;
- using Vertex = typename SLAMGraph::vertex_descriptor;
- using Edge = typename SLAMGraph::edge_descriptor;
-
- /** \brief Empty constructor.
- */
- LUM ()
- : slam_graph_ (new SLAMGraph)
- , max_iterations_ (5)
- , convergence_threshold_ (0.0)
- {
- }
-
- /** \brief Set the internal SLAM graph structure.
- * \details All data used and produced by LUM is stored in this boost::adjacency_list.
- * It is recommended to use the LUM class itself to build the graph.
- * This method could otherwise be useful for managing several SLAM graphs in one instance of LUM.
- * \param[in] slam_graph The new SLAM graph.
- */
- inline void
- setLoopGraph (const SLAMGraphPtr &slam_graph);
-
- /** \brief Get the internal SLAM graph structure.
- * \details All data used and produced by LUM is stored in this boost::adjacency_list.
- * It is recommended to use the LUM class itself to build the graph.
- * This method could otherwise be useful for managing several SLAM graphs in one instance of LUM.
- * \return The current SLAM graph.
- */
- inline SLAMGraphPtr
- getLoopGraph () const;
-
- /** \brief Get the number of vertices in the SLAM graph.
- * \return The current number of vertices in the SLAM graph.
- */
- typename SLAMGraph::vertices_size_type
- getNumVertices () const;
-
- /** \brief Set the maximum number of iterations for the compute() method.
- * \details The compute() method finishes when max_iterations are met or when the convergence criteria is met.
- * \param[in] max_iterations The new maximum number of iterations (default = 5).
- */
- void
- setMaxIterations (int max_iterations);
-
- /** \brief Get the maximum number of iterations for the compute() method.
- * \details The compute() method finishes when max_iterations are met or when the convergence criteria is met.
- * \return The current maximum number of iterations (default = 5).
- */
- inline int
- getMaxIterations () const;
-
- /** \brief Set the convergence threshold for the compute() method.
- * \details When the compute() method computes the new poses relative to the old poses, it will determine the length of the difference vector.
- * When the average length of all difference vectors becomes less than the convergence_threshold the convergence is assumed to be met.
- * \param[in] convergence_threshold The new convergence threshold (default = 0.0).
- */
- void
- setConvergenceThreshold (float convergence_threshold);
-
- /** \brief Get the convergence threshold for the compute() method.
- * \details When the compute() method computes the new poses relative to the old poses, it will determine the length of the difference vector.
- * When the average length of all difference vectors becomes less than the convergence_threshold the convergence is assumed to be met.
- * \return The current convergence threshold (default = 0.0).
- */
- inline float
- getConvergenceThreshold () const;
-
- /** \brief Add a new point cloud to the SLAM graph.
- * \details This method will add a new vertex to the SLAM graph and attach a point cloud to that vertex.
- * Optionally you can specify a pose estimate for this point cloud.
- * A vertex' pose is always relative to the first vertex in the SLAM graph, i.e. the first point cloud that was added.
- * Because this first vertex is the reference, you can not set a pose estimate for this vertex.
- * Providing pose estimates to the vertices in the SLAM graph will reduce overall computation time of LUM.
- * \note Vertex descriptors are typecastable to int.
- * \param[in] cloud The new point cloud.
- * \param[in] pose (optional) The pose estimate relative to the reference pose (first point cloud that was added).
- * \return The vertex descriptor of the newly created vertex.
- */
- Vertex
- addPointCloud (const PointCloudPtr &cloud, const Eigen::Vector6f &pose = Eigen::Vector6f::Zero ());
-
- /** \brief Change a point cloud on one of the SLAM graph's vertices.
- * \details This method will change the point cloud attached to an existing vertex and will not alter the SLAM graph structure.
- * Note that the correspondences attached to this vertex will not change and may need to be updated manually.
- * \note Vertex descriptors are typecastable to int.
- * \param[in] vertex The vertex descriptor of which to change the point cloud.
- * \param[in] cloud The new point cloud for that vertex.
- */
- inline void
- setPointCloud (const Vertex &vertex, const PointCloudPtr &cloud);
-
- /** \brief Return a point cloud from one of the SLAM graph's vertices.
- * \note Vertex descriptors are typecastable to int.
- * \param[in] vertex The vertex descriptor of which to return the point cloud.
- * \return The current point cloud for that vertex.
- */
- inline PointCloudPtr
- getPointCloud (const Vertex &vertex) const;
-
- /** \brief Change a pose estimate on one of the SLAM graph's vertices.
- * \details A vertex' pose is always relative to the first vertex in the SLAM graph, i.e. the first point cloud that was added.
- * Because this first vertex is the reference, you can not set a pose estimate for this vertex.
- * Providing pose estimates to the vertices in the SLAM graph will reduce overall computation time of LUM.
- * \note Vertex descriptors are typecastable to int.
- * \param[in] vertex The vertex descriptor of which to set the pose estimate.
- * \param[in] pose The new pose estimate for that vertex.
- */
- inline void
- setPose (const Vertex &vertex, const Eigen::Vector6f &pose);
-
- /** \brief Return a pose estimate from one of the SLAM graph's vertices.
- * \note Vertex descriptors are typecastable to int.
- * \param[in] vertex The vertex descriptor of which to return the pose estimate.
- * \return The current pose estimate of that vertex.
- */
- inline Eigen::Vector6f
- getPose (const Vertex &vertex) const;
-
- /** \brief Return a pose estimate from one of the SLAM graph's vertices as an affine transformation matrix.
- * \note Vertex descriptors are typecastable to int.
- * \param[in] vertex The vertex descriptor of which to return the transformation matrix.
- * \return The current transformation matrix of that vertex.
- */
- inline Eigen::Affine3f
- getTransformation (const Vertex &vertex) const;
-
- /** \brief Add/change a set of correspondences for one of the SLAM graph's edges.
- * \details The edges in the SLAM graph are directional and point from source vertex to target vertex.
- * The query indices of the correspondences, index the points at the source vertex' point cloud.
- * The matching indices of the correspondences, index the points at the target vertex' point cloud.
- * If no edge was present at the specified location, this method will add a new edge to the SLAM graph and attach the correspondences to that edge.
- * If the edge was already present, this method will overwrite the correspondence information of that edge and will not alter the SLAM graph structure.
- * \note Vertex descriptors are typecastable to int.
- * \param[in] source_vertex The vertex descriptor of the correspondences' source point cloud.
- * \param[in] target_vertex The vertex descriptor of the correspondences' target point cloud.
- * \param[in] corrs The new set of correspondences for that edge.
- */
- void
- setCorrespondences (const Vertex &source_vertex,
- const Vertex &target_vertex,
- const pcl::CorrespondencesPtr &corrs);
-
- /** \brief Return a set of correspondences from one of the SLAM graph's edges.
- * \note Vertex descriptors are typecastable to int.
- * \param[in] source_vertex The vertex descriptor of the correspondences' source point cloud.
- * \param[in] target_vertex The vertex descriptor of the correspondences' target point cloud.
- * \return The current set of correspondences of that edge.
- */
- inline pcl::CorrespondencesPtr
- getCorrespondences (const Vertex &source_vertex, const Vertex &target_vertex) const;
-
- /** \brief Perform LUM's globally consistent scan matching.
- * \details Computation uses the first point cloud in the SLAM graph as a reference pose and attempts to align all other point clouds to it simultaneously.
- * <br>
- * Things to keep in mind:
- * <ul>
- * <li>Only those parts of the graph connected to the reference pose will properly align to it.</li>
- * <li>All sets of correspondences should span the same space and need to be sufficient to determine a rigid transformation.</li>
- * <li>The algorithm draws it strength from loops in the graph because it will distribute errors evenly amongst those loops.</li>
- * </ul>
- * Computation ends when either of the following conditions hold:
- * <ul>
- * <li>The number of iterations reaches max_iterations. Use setMaxIterations() to change.</li>
- * <li>The convergence criteria is met. Use setConvergenceThreshold() to change.</li>
- * </ul>
- * Computation will change the pose estimates for the vertices of the SLAM graph, not the point clouds attached to them.
- * The results can be retrieved with getPose(), getTransformation(), getTransformedCloud() or getConcatenatedCloud().
- */
- void
- compute ();
-
- /** \brief Return a point cloud from one of the SLAM graph's vertices compounded onto its current pose estimate.
- * \note Vertex descriptors are typecastable to int.
- * \param[in] vertex The vertex descriptor of which to return the transformed point cloud.
- * \return The transformed point cloud of that vertex.
- */
- PointCloudPtr
- getTransformedCloud (const Vertex &vertex) const;
-
- /** \brief Return a concatenated point cloud of all the SLAM graph's point clouds compounded onto their current pose estimates.
- * \return The concatenated transformed point clouds of the entire SLAM graph.
- */
- PointCloudPtr
- getConcatenatedCloud () const;
-
- protected:
- /** \brief Linearized computation of C^-1 and C^-1*D (results stored in slam_graph_). */
- void
- computeEdge (const Edge &e);
-
- /** \brief Returns a pose corrected 6DoF incidence matrix. */
- inline Eigen::Matrix6f
- incidenceCorrection (const Eigen::Vector6f &pose);
-
- private:
- /** \brief The internal SLAM graph structure. */
- SLAMGraphPtr slam_graph_;
-
- /** \brief The maximum number of iterations for the compute() method. */
- int max_iterations_;
-
- /** \brief The convergence threshold for the summed vector lengths of all poses. */
- float convergence_threshold_;
- };
- }
-}
+namespace Eigen {
+using Vector6f = Eigen::Matrix<float, 6, 1>;
+using Matrix6f = Eigen::Matrix<float, 6, 6>;
+} // namespace Eigen
+
+namespace pcl {
+namespace registration {
+/** \brief Globally Consistent Scan Matching based on an algorithm by Lu and Milios.
+ * \details A GraphSLAM algorithm where registration data is managed in a graph:
+ * <ul>
+ * <li>Vertices represent poses and hold the point cloud data and relative
+ * transformations.</li> <li>Edges represent pose constraints and hold the
+ * correspondence data between two point clouds.</li>
+ * </ul>
+ * Computation uses the first point cloud in the SLAM graph as a reference pose and
+ * attempts to align all other point clouds to it simultaneously. For more information:
+ * <ul><li>
+ * F. Lu, E. Milios,
+ * Globally Consistent Range Scan Alignment for Environment Mapping,
+ * Autonomous Robots 4, April 1997
+ * </li><li>
+ * Dorit Borrmann, Jan Elseberg, Kai Lingemann, Andreas Nüchter, and Joachim Hertzberg,
+ * The Efficient Extension of Globally Consistent Scan Matching to 6 DoF,
+ * In Proceedings of the 4th International Symposium on 3D Data Processing,
+ * Visualization and Transmission (3DPVT '08), June 2008
+ * </li></ul>
+ * Usage example:
+ * \code
+ * pcl::registration::LUM<pcl::PointXYZ> lum;
+ * // Add point clouds as vertices to the SLAM graph
+ * lum.addPointCloud (cloud_0);
+ * lum.addPointCloud (cloud_1);
+ * lum.addPointCloud (cloud_2);
+ * // Use your favorite pairwise correspondence estimation algorithm(s)
+ * corrs_0_to_1 = someAlgo (cloud_0, cloud_1);
+ * corrs_1_to_2 = someAlgo (cloud_1, cloud_2);
+ * corrs_2_to_0 = someAlgo (lum.getPointCloud (2), lum.getPointCloud (0));
+ * // Add the correspondence results as edges to the SLAM graph
+ * lum.setCorrespondences (0, 1, corrs_0_to_1);
+ * lum.setCorrespondences (1, 2, corrs_1_to_2);
+ * lum.setCorrespondences (2, 0, corrs_2_to_0);
+ * // Change the computation parameters
+ * lum.setMaxIterations (5);
+ * lum.setConvergenceThreshold (0.0);
+ * // Perform the actual LUM computation
+ * lum.compute ();
+ * // Return the concatenated point cloud result
+ * cloud_out = lum.getConcatenatedCloud ();
+ * // Return the separate point cloud transformations
+ * for(int i = 0; i < lum.getNumVertices (); i++)
+ * {
+ * transforms_out[i] = lum.getTransformation (i);
+ * }
+ * \endcode
+ * \author Frits Florentinus, Jochen Sprickerhof
+ * \ingroup registration
+ */
+template <typename PointT>
+class LUM {
+public:
+ using Ptr = shared_ptr<LUM<PointT>>;
+ using ConstPtr = shared_ptr<const LUM<PointT>>;
+
+ using PointCloud = pcl::PointCloud<PointT>;
+ using PointCloudPtr = typename PointCloud::Ptr;
+ using PointCloudConstPtr = typename PointCloud::ConstPtr;
+
+ struct VertexProperties {
+ PointCloudPtr cloud_;
+ Eigen::Vector6f pose_;
+ PCL_MAKE_ALIGNED_OPERATOR_NEW
+ };
+ struct EdgeProperties {
+ pcl::CorrespondencesPtr corrs_;
+ Eigen::Matrix6f cinv_;
+ Eigen::Vector6f cinvd_;
+ PCL_MAKE_ALIGNED_OPERATOR_NEW
+ };
+
+ using SLAMGraph = boost::adjacency_list<boost::eigen_vecS,
+ boost::eigen_vecS,
+ boost::bidirectionalS,
+ VertexProperties,
+ EdgeProperties,
+ boost::no_property,
+ boost::eigen_listS>;
+ using SLAMGraphPtr = shared_ptr<SLAMGraph>;
+ using Vertex = typename SLAMGraph::vertex_descriptor;
+ using Edge = typename SLAMGraph::edge_descriptor;
+
+ /** \brief Empty constructor.
+ */
+ LUM() : slam_graph_(new SLAMGraph), max_iterations_(5), convergence_threshold_(0.0) {}
+
+ /** \brief Set the internal SLAM graph structure.
+ * \details All data used and produced by LUM is stored in this boost::adjacency_list.
+ * It is recommended to use the LUM class itself to build the graph.
+ * This method could otherwise be useful for managing several SLAM graphs in one
+ * instance of LUM. \param[in] slam_graph The new SLAM graph.
+ */
+ inline void
+ setLoopGraph(const SLAMGraphPtr& slam_graph);
+
+ /** \brief Get the internal SLAM graph structure.
+ * \details All data used and produced by LUM is stored in this boost::adjacency_list.
+ * It is recommended to use the LUM class itself to build the graph.
+ * This method could otherwise be useful for managing several SLAM graphs in one
+ * instance of LUM. \return The current SLAM graph.
+ */
+ inline SLAMGraphPtr
+ getLoopGraph() const;
+
+ /** \brief Get the number of vertices in the SLAM graph.
+ * \return The current number of vertices in the SLAM graph.
+ */
+ typename SLAMGraph::vertices_size_type
+ getNumVertices() const;
+
+ /** \brief Set the maximum number of iterations for the compute() method.
+ * \details The compute() method finishes when max_iterations are met or when the
+ * convergence criteria is met. \param[in] max_iterations The new maximum number of
+ * iterations (default = 5).
+ */
+ void
+ setMaxIterations(int max_iterations);
+
+ /** \brief Get the maximum number of iterations for the compute() method.
+ * \details The compute() method finishes when max_iterations are met or when the
+ * convergence criteria is met. \return The current maximum number of iterations
+ * (default = 5).
+ */
+ inline int
+ getMaxIterations() const;
+
+ /** \brief Set the convergence threshold for the compute() method.
+ * \details When the compute() method computes the new poses relative to the old
+ * poses, it will determine the length of the difference vector. When the average
+ * length of all difference vectors becomes less than the convergence_threshold the
+ * convergence is assumed to be met. \param[in] convergence_threshold The new
+ * convergence threshold (default = 0.0).
+ */
+ void
+ setConvergenceThreshold(float convergence_threshold);
+
+ /** \brief Get the convergence threshold for the compute() method.
+ * \details When the compute() method computes the new poses relative to the old
+ * poses, it will determine the length of the difference vector. When the average
+ * length of all difference vectors becomes less than the convergence_threshold the
+ * convergence is assumed to be met. \return The current convergence threshold
+ * (default = 0.0).
+ */
+ inline float
+ getConvergenceThreshold() const;
+
+ /** \brief Add a new point cloud to the SLAM graph.
+ * \details This method will add a new vertex to the SLAM graph and attach a point
+ * cloud to that vertex. Optionally you can specify a pose estimate for this point
+ * cloud. A vertex' pose is always relative to the first vertex in the SLAM graph,
+ * i.e. the first point cloud that was added. Because this first vertex is the
+ * reference, you can not set a pose estimate for this vertex. Providing pose
+ * estimates to the vertices in the SLAM graph will reduce overall computation time of
+ * LUM. \note Vertex descriptors are typecastable to int. \param[in] cloud The new
+ * point cloud. \param[in] pose (optional) The pose estimate relative to the reference
+ * pose (first point cloud that was added). \return The vertex descriptor of the newly
+ * created vertex.
+ */
+ Vertex
+ addPointCloud(const PointCloudPtr& cloud,
+ const Eigen::Vector6f& pose = Eigen::Vector6f::Zero());
+
+ /** \brief Change a point cloud on one of the SLAM graph's vertices.
+ * \details This method will change the point cloud attached to an existing vertex and
+ * will not alter the SLAM graph structure. Note that the correspondences attached to
+ * this vertex will not change and may need to be updated manually. \note Vertex
+ * descriptors are typecastable to int. \param[in] vertex The vertex descriptor of
+ * which to change the point cloud. \param[in] cloud The new point cloud for that
+ * vertex.
+ */
+ inline void
+ setPointCloud(const Vertex& vertex, const PointCloudPtr& cloud);
+
+ /** \brief Return a point cloud from one of the SLAM graph's vertices.
+ * \note Vertex descriptors are typecastable to int.
+ * \param[in] vertex The vertex descriptor of which to return the point cloud.
+ * \return The current point cloud for that vertex.
+ */
+ inline PointCloudPtr
+ getPointCloud(const Vertex& vertex) const;
+
+ /** \brief Change a pose estimate on one of the SLAM graph's vertices.
+ * \details A vertex' pose is always relative to the first vertex in the SLAM graph,
+ * i.e. the first point cloud that was added. Because this first vertex is the
+ * reference, you can not set a pose estimate for this vertex. Providing pose
+ * estimates to the vertices in the SLAM graph will reduce overall computation time of
+ * LUM. \note Vertex descriptors are typecastable to int. \param[in] vertex The vertex
+ * descriptor of which to set the pose estimate. \param[in] pose The new pose estimate
+ * for that vertex.
+ */
+ inline void
+ setPose(const Vertex& vertex, const Eigen::Vector6f& pose);
+
+ /** \brief Return a pose estimate from one of the SLAM graph's vertices.
+ * \note Vertex descriptors are typecastable to int.
+ * \param[in] vertex The vertex descriptor of which to return the pose estimate.
+ * \return The current pose estimate of that vertex.
+ */
+ inline Eigen::Vector6f
+ getPose(const Vertex& vertex) const;
+
+ /** \brief Return a pose estimate from one of the SLAM graph's vertices as an affine
+ * transformation matrix. \note Vertex descriptors are typecastable to int. \param[in]
+ * vertex The vertex descriptor of which to return the transformation matrix. \return
+ * The current transformation matrix of that vertex.
+ */
+ inline Eigen::Affine3f
+ getTransformation(const Vertex& vertex) const;
+
+ /** \brief Add/change a set of correspondences for one of the SLAM graph's edges.
+ * \details The edges in the SLAM graph are directional and point from source vertex
+ * to target vertex. The query indices of the correspondences, index the points at the
+ * source vertex' point cloud. The matching indices of the correspondences, index the
+ * points at the target vertex' point cloud. If no edge was present at the specified
+ * location, this method will add a new edge to the SLAM graph and attach the
+ * correspondences to that edge. If the edge was already present, this method will
+ * overwrite the correspondence information of that edge and will not alter the SLAM
+ * graph structure. \note Vertex descriptors are typecastable to int. \param[in]
+ * source_vertex The vertex descriptor of the correspondences' source point cloud.
+ * \param[in] target_vertex The vertex descriptor of the correspondences' target point
+ * cloud. \param[in] corrs The new set of correspondences for that edge.
+ */
+ void
+ setCorrespondences(const Vertex& source_vertex,
+ const Vertex& target_vertex,
+ const pcl::CorrespondencesPtr& corrs);
+
+ /** \brief Return a set of correspondences from one of the SLAM graph's edges.
+ * \note Vertex descriptors are typecastable to int.
+ * \param[in] source_vertex The vertex descriptor of the correspondences' source point
+ * cloud. \param[in] target_vertex The vertex descriptor of the correspondences'
+ * target point cloud. \return The current set of correspondences of that edge.
+ */
+ inline pcl::CorrespondencesPtr
+ getCorrespondences(const Vertex& source_vertex, const Vertex& target_vertex) const;
+
+ /** \brief Perform LUM's globally consistent scan matching.
+ * \details Computation uses the first point cloud in the SLAM graph as a reference
+ * pose and attempts to align all other point clouds to it simultaneously. <br> Things
+ * to keep in mind: <ul> <li>Only those parts of the graph connected to the reference
+ * pose will properly align to it.</li> <li>All sets of correspondences should span
+ * the same space and need to be sufficient to determine a rigid transformation.</li>
+ * <li>The algorithm draws it strength from loops in the graph because it will
+ * distribute errors evenly amongst those loops.</li>
+ * </ul>
+ * Computation ends when either of the following conditions hold:
+ * <ul>
+ * <li>The number of iterations reaches max_iterations. Use setMaxIterations() to
+ * change.</li> <li>The convergence criteria is met. Use setConvergenceThreshold() to
+ * change.</li>
+ * </ul>
+ * Computation will change the pose estimates for the vertices of the SLAM graph, not
+ * the point clouds attached to them. The results can be retrieved with getPose(),
+ * getTransformation(), getTransformedCloud() or getConcatenatedCloud().
+ */
+ void
+ compute();
+
+ /** \brief Return a point cloud from one of the SLAM graph's vertices compounded onto
+ * its current pose estimate. \note Vertex descriptors are typecastable to int.
+ * \param[in] vertex The vertex descriptor of which to return the transformed point
+ * cloud. \return The transformed point cloud of that vertex.
+ */
+ PointCloudPtr
+ getTransformedCloud(const Vertex& vertex) const;
+
+ /** \brief Return a concatenated point cloud of all the SLAM graph's point clouds
+ * compounded onto their current pose estimates. \return The concatenated transformed
+ * point clouds of the entire SLAM graph.
+ */
+ PointCloudPtr
+ getConcatenatedCloud() const;
+
+protected:
+ /** \brief Linearized computation of C^-1 and C^-1*D (results stored in slam_graph_).
+ */
+ void
+ computeEdge(const Edge& e);
+
+ /** \brief Returns a pose corrected 6DoF incidence matrix. */
+ inline Eigen::Matrix6f
+ incidenceCorrection(const Eigen::Vector6f& pose);
+
+private:
+ /** \brief The internal SLAM graph structure. */
+ SLAMGraphPtr slam_graph_;
+
+ /** \brief The maximum number of iterations for the compute() method. */
+ int max_iterations_;
+
+ /** \brief The convergence threshold for the summed vector lengths of all poses. */
+ float convergence_threshold_;
+};
+} // namespace registration
+} // namespace pcl
#ifdef PCL_NO_PRECOMPILE
#include <pcl/registration/impl/lum.hpp>
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
#pragma once
+#include <pcl/common/common.h>
+#include <pcl/registration/registration.h>
#include <pcl/memory.h>
#include <pcl/pcl_macros.h>
-#include <pcl/registration/registration.h>
-#include <pcl/common/common.h>
-
-namespace pcl
-{
- namespace registration
- {
- /** \brief Container for matching candidate consisting of
- *
- * * fitness score value as a result of the matching algorithm
- * * correspondences between source and target data set
- * * transformation matrix calculated based on the correspondences
- *
- */
- struct MatchingCandidate
- {
- /** \brief Constructor. */
- MatchingCandidate () :
- fitness_score (FLT_MAX),
- transformation (Eigen::Matrix4f::Identity ())
- {};
- /** \brief Value constructor. */
- MatchingCandidate (float s, const pcl::Correspondences &c, const Eigen::Matrix4f &m) :
- fitness_score (s),
- correspondences (c),
- transformation (m)
- {};
+namespace pcl {
+namespace registration {
+/** \brief Container for matching candidate consisting of
+ *
+ * * fitness score value as a result of the matching algorithm
+ * * correspondences between source and target data set
+ * * transformation matrix calculated based on the correspondences
+ *
+ */
+struct MatchingCandidate {
+ /** \brief Constructor. */
+ MatchingCandidate()
+ : fitness_score(FLT_MAX), transformation(Eigen::Matrix4f::Identity()){};
- /** \brief Destructor. */
- ~MatchingCandidate ()
- {};
+ /** \brief Value constructor. */
+ MatchingCandidate(float s, const pcl::Correspondences& c, const Eigen::Matrix4f& m)
+ : fitness_score(s), correspondences(c), transformation(m){};
+ /** \brief Destructor. */
+ ~MatchingCandidate(){};
- /** \brief Fitness score of current candidate resulting from matching algorithm. */
- float fitness_score;
+ /** \brief Fitness score of current candidate resulting from matching algorithm. */
+ float fitness_score;
- /** \brief Correspondences between source <-> target. */
- pcl::Correspondences correspondences;
+ /** \brief Correspondences between source <-> target. */
+ pcl::Correspondences correspondences;
- /** \brief Corresponding transformation matrix retrieved using \a corrs. */
- Eigen::Matrix4f transformation;
+ /** \brief Corresponding transformation matrix retrieved using \a corrs. */
+ Eigen::Matrix4f transformation;
- PCL_MAKE_ALIGNED_OPERATOR_NEW
- };
+ PCL_MAKE_ALIGNED_OPERATOR_NEW
+};
- using MatchingCandidates = std::vector<MatchingCandidate, Eigen::aligned_allocator<MatchingCandidate> >;
+using MatchingCandidates =
+ std::vector<MatchingCandidate, Eigen::aligned_allocator<MatchingCandidate>>;
- /** \brief Sorting of candidates based on fitness score value. */
- struct by_score
- {
- /** \brief Operator used to sort candidates based on fitness score. */
- bool operator () (MatchingCandidate const &left, MatchingCandidate const &right)
- {
- return (left.fitness_score < right.fitness_score);
- }
- };
+/** \brief Sorting of candidates based on fitness score value. */
+struct by_score {
+ /** \brief Operator used to sort candidates based on fitness score. */
+ bool
+ operator()(MatchingCandidate const& left, MatchingCandidate const& right)
+ {
+ return (left.fitness_score < right.fitness_score);
+ }
+};
- }; // namespace registration
+}; // namespace registration
}; // namespace pcl
#pragma once
-#include <pcl/point_cloud.h>
#include <pcl/registration/registration.h>
+#include <pcl/point_cloud.h>
namespace pcl {
- namespace registration {
-
- /** \brief Meta @ref Registration class
- * \note This method accumulates all registered points and becomes more costly with each registered point cloud.
- *
- * This class provides a way to register a stream of clouds where each cloud
- * will be aligned to the conglomerate of all previous clouds.
- *
- * \code
- * IterativeClosestPoint<PointXYZ,PointXYZ>::Ptr icp (new IterativeClosestPoint<PointXYZ,PointXYZ>);
- * icp->setMaxCorrespondenceDistance (0.05);
- * icp->setMaximumIterations (50);
- *
- * MetaRegistration<PointXYZ> mreg;
- * mreg.setRegistration (icp);
- *
- * while (true)
- * {
- * PointCloud<PointXYZ>::Ptr cloud (new PointCloud<PointXYZ>);
- * read_cloud (*cloud);
- * mreg.registerCloud (cloud);
- *
- * PointCloud<PointXYZ>::Ptr tmp (new PointCloud<PointXYZ>);
- * transformPointCloud (*cloud, *tmp, mreg.getAbsoluteTransform ());
- * write_cloud (*tmp);
- * }
- * \endcode
- *
- * \author Michael 'v4hn' Goerner
- * \ingroup registration
- */
- template <typename PointT, typename Scalar = float>
- class MetaRegistration {
- public:
- using PointCloudPtr = typename pcl::PointCloud<PointT>::Ptr;
- using PointCloudConstPtr = typename pcl::PointCloud<PointT>::ConstPtr;
-
- using RegistrationPtr = typename pcl::Registration<PointT,PointT,Scalar>::Ptr;
- using Matrix4 = typename pcl::Registration<PointT,PointT,Scalar>::Matrix4;
-
- MetaRegistration ();
-
- /** \brief Empty destructor */
- virtual ~MetaRegistration () {};
-
- /** \brief Register new point cloud
- * \note You have to set a valid registration object with \ref setRegistration before using this
- * \param[in] cloud point cloud to register
- * \param[in] delta_estimate estimated transform between last registered cloud and this one
- * \return true if registration converged
- */
- bool
- registerCloud (const PointCloudConstPtr& cloud, const Matrix4& delta_estimate = Matrix4::Identity ());
-
- /** \brief Get estimated transform of the last registered cloud */
- inline Matrix4
- getAbsoluteTransform () const;
-
- /** \brief Reset MetaRegistration without resetting registration_ */
- inline void
- reset ();
-
- /** \brief Set registration instance used to align clouds */
- inline void
- setRegistration (RegistrationPtr);
-
- /** \brief get accumulated meta point cloud */
- inline PointCloudConstPtr
- getMetaCloud () const;
- protected:
-
- /** \brief registered accumulated point cloud */
- PointCloudPtr full_cloud_;
-
- /** \brief registration instance to align clouds */
- RegistrationPtr registration_;
-
- /** \brief estimated transform */
- Matrix4 abs_transform_;
- };
-
- }
-}
+namespace registration {
+
+/** \brief Meta @ref Registration class
+ * \note This method accumulates all registered points and becomes more costly with each
+ * registered point cloud.
+ *
+ * This class provides a way to register a stream of clouds where each cloud
+ * will be aligned to the conglomerate of all previous clouds.
+ *
+ * \code
+ * IterativeClosestPoint<PointXYZ,PointXYZ>::Ptr icp
+ * (new IterativeClosestPoint<PointXYZ,PointXYZ>);
+ * icp->setMaxCorrespondenceDistance (0.05);
+ * icp->setMaximumIterations (50);
+ *
+ * MetaRegistration<PointXYZ> mreg;
+ * mreg.setRegistration (icp);
+ *
+ * while (true)
+ * {
+ * PointCloud<PointXYZ>::Ptr cloud (new PointCloud<PointXYZ>);
+ * read_cloud (*cloud);
+ * mreg.registerCloud (cloud);
+ *
+ * PointCloud<PointXYZ>::Ptr tmp (new PointCloud<PointXYZ>);
+ * transformPointCloud (*cloud, *tmp, mreg.getAbsoluteTransform ());
+ * write_cloud (*tmp);
+ * }
+ * \endcode
+ *
+ * \author Michael 'v4hn' Goerner
+ * \ingroup registration
+ */
+template <typename PointT, typename Scalar = float>
+class MetaRegistration {
+public:
+ using PointCloudPtr = typename pcl::PointCloud<PointT>::Ptr;
+ using PointCloudConstPtr = typename pcl::PointCloud<PointT>::ConstPtr;
+
+ using RegistrationPtr = typename pcl::Registration<PointT, PointT, Scalar>::Ptr;
+ using Matrix4 = typename pcl::Registration<PointT, PointT, Scalar>::Matrix4;
+
+ MetaRegistration();
+
+ /** \brief Empty destructor */
+ virtual ~MetaRegistration(){};
+
+ /** \brief Register new point cloud
+ * \note You have to set a valid registration object with \ref setRegistration before
+ * using this \param[in] cloud point cloud to register \param[in] delta_estimate
+ * estimated transform between last registered cloud and this one \return true if
+ * registration converged
+ */
+ bool
+ registerCloud(const PointCloudConstPtr& cloud,
+ const Matrix4& delta_estimate = Matrix4::Identity());
+
+ /** \brief Get estimated transform of the last registered cloud */
+ inline Matrix4
+ getAbsoluteTransform() const;
+
+ /** \brief Reset MetaRegistration without resetting registration_ */
+ inline void
+ reset();
+
+ /** \brief Set registration instance used to align clouds */
+ inline void setRegistration(RegistrationPtr);
+
+ /** \brief get accumulated meta point cloud */
+ inline PointCloudConstPtr
+ getMetaCloud() const;
+
+protected:
+ /** \brief registered accumulated point cloud */
+ PointCloudPtr full_cloud_;
+
+ /** \brief registration instance to align clouds */
+ RegistrationPtr registration_;
+
+ /** \brief estimated transform */
+ Matrix4 abs_transform_;
+};
+
+} // namespace registration
+} // namespace pcl
#include <pcl/registration/impl/meta_registration.hpp>
#pragma once
+#include <pcl/common/utils.h>
+#include <pcl/filters/voxel_grid_covariance.h>
+#include <pcl/registration/registration.h>
#include <pcl/memory.h>
#include <pcl/pcl_macros.h>
-#include <pcl/registration/registration.h>
-#include <pcl/filters/voxel_grid_covariance.h>
#include <unsupported/Eigen/NonLinearOptimization>
-namespace pcl
-{
- /** \brief A 3D Normal Distribution Transform registration implementation for point cloud data.
- * \note For more information please see
- * <b>Magnusson, M. (2009). The Three-Dimensional Normal-Distributions Transform —
- * an Efficient Representation for Registration, Surface Analysis, and Loop Detection.
- * PhD thesis, Orebro University. Orebro Studies in Technology 36.</b>,
- * <b>More, J., and Thuente, D. (1994). Line Search Algorithm with Guaranteed Sufficient Decrease
- * In ACM Transactions on Mathematical Software.</b> and
- * Sun, W. and Yuan, Y, (2006) Optimization Theory and Methods: Nonlinear Programming. 89-100
- * \note Math refactored by Todor Stoyanov.
- * \author Brian Okorn (Space and Naval Warfare Systems Center Pacific)
- */
- template<typename PointSource, typename PointTarget>
- class NormalDistributionsTransform : public Registration<PointSource, PointTarget>
+namespace pcl {
+/** \brief A 3D Normal Distribution Transform registration implementation for point
+ * cloud data. \note For more information please see <b>Magnusson, M. (2009). The
+ * Three-Dimensional Normal-Distributions Transform — an Efficient Representation for
+ * Registration, Surface Analysis, and Loop Detection. PhD thesis, Orebro University.
+ * Orebro Studies in Technology 36.</b>, <b>More, J., and Thuente, D. (1994). Line
+ * Search Algorithm with Guaranteed Sufficient Decrease In ACM Transactions on
+ * Mathematical Software.</b> and Sun, W. and Yuan, Y, (2006) Optimization Theory and
+ * Methods: Nonlinear Programming. 89-100 \note Math refactored by Todor Stoyanov.
+ * \author Brian Okorn (Space and Naval Warfare Systems Center Pacific)
+ */
+template <typename PointSource, typename PointTarget>
+class NormalDistributionsTransform : public Registration<PointSource, PointTarget> {
+protected:
+ using PointCloudSource =
+ typename Registration<PointSource, PointTarget>::PointCloudSource;
+ using PointCloudSourcePtr = typename PointCloudSource::Ptr;
+ using PointCloudSourceConstPtr = typename PointCloudSource::ConstPtr;
+
+ using PointCloudTarget =
+ typename Registration<PointSource, PointTarget>::PointCloudTarget;
+ using PointCloudTargetPtr = typename PointCloudTarget::Ptr;
+ using PointCloudTargetConstPtr = typename PointCloudTarget::ConstPtr;
+
+ using PointIndicesPtr = PointIndices::Ptr;
+ using PointIndicesConstPtr = PointIndices::ConstPtr;
+
+ /** \brief Typename of searchable voxel grid containing mean and covariance. */
+ using TargetGrid = VoxelGridCovariance<PointTarget>;
+ /** \brief Typename of pointer to searchable voxel grid. */
+ using TargetGridPtr = TargetGrid*;
+ /** \brief Typename of const pointer to searchable voxel grid. */
+ using TargetGridConstPtr = const TargetGrid*;
+ /** \brief Typename of const pointer to searchable voxel grid leaf. */
+ using TargetGridLeafConstPtr = typename TargetGrid::LeafConstPtr;
+
+public:
+ using Ptr = shared_ptr<NormalDistributionsTransform<PointSource, PointTarget>>;
+ using ConstPtr =
+ shared_ptr<const NormalDistributionsTransform<PointSource, PointTarget>>;
+
+ /** \brief Constructor.
+ * Sets \ref outlier_ratio_ to 0.35, \ref step_size_ to 0.05 and \ref resolution_
+ * to 1.0
+ */
+ NormalDistributionsTransform();
+
+ /** \brief Empty destructor */
+ ~NormalDistributionsTransform() {}
+
+ /** \brief Provide a pointer to the input target (e.g., the point cloud that we want
+ * to align the input source to). \param[in] cloud the input point cloud target
+ */
+ inline void
+ setInputTarget(const PointCloudTargetConstPtr& cloud) override
{
- protected:
-
- using PointCloudSource = typename Registration<PointSource, PointTarget>::PointCloudSource;
- using PointCloudSourcePtr = typename PointCloudSource::Ptr;
- using PointCloudSourceConstPtr = typename PointCloudSource::ConstPtr;
-
- using PointCloudTarget = typename Registration<PointSource, PointTarget>::PointCloudTarget;
- using PointCloudTargetPtr = typename PointCloudTarget::Ptr;
- using PointCloudTargetConstPtr = typename PointCloudTarget::ConstPtr;
-
- using PointIndicesPtr = PointIndices::Ptr;
- using PointIndicesConstPtr = PointIndices::ConstPtr;
-
- /** \brief Typename of searchable voxel grid containing mean and covariance. */
- using TargetGrid = VoxelGridCovariance<PointTarget>;
- /** \brief Typename of pointer to searchable voxel grid. */
- using TargetGridPtr = TargetGrid *;
- /** \brief Typename of const pointer to searchable voxel grid. */
- using TargetGridConstPtr = const TargetGrid *;
- /** \brief Typename of const pointer to searchable voxel grid leaf. */
- using TargetGridLeafConstPtr = typename TargetGrid::LeafConstPtr;
-
-
- public:
-
- using Ptr = shared_ptr< NormalDistributionsTransform<PointSource, PointTarget> >;
- using ConstPtr = shared_ptr< const NormalDistributionsTransform<PointSource, PointTarget> >;
-
-
- /** \brief Constructor.
- * Sets \ref outlier_ratio_ to 0.35, \ref step_size_ to 0.05 and \ref resolution_ to 1.0
- */
- NormalDistributionsTransform ();
-
- /** \brief Empty destructor */
- ~NormalDistributionsTransform () {}
-
- /** \brief Provide a pointer to the input target (e.g., the point cloud that we want to align the input source to).
- * \param[in] cloud the input point cloud target
- */
- inline void
- setInputTarget (const PointCloudTargetConstPtr &cloud) override
- {
- Registration<PointSource, PointTarget>::setInputTarget (cloud);
- init ();
- }
-
- /** \brief Set/change the voxel grid resolution.
- * \param[in] resolution side length of voxels
- */
- inline void
- setResolution (float resolution)
- {
- // Prevents unnessary voxel initiations
- if (resolution_ != resolution)
- {
- resolution_ = resolution;
- if (input_)
- init ();
- }
- }
-
- /** \brief Get voxel grid resolution.
- * \return side length of voxels
- */
- inline float
- getResolution () const
- {
- return (resolution_);
- }
-
- /** \brief Get the newton line search maximum step length.
- * \return maximum step length
- */
- inline double
- getStepSize () const
- {
- return (step_size_);
- }
-
- /** \brief Set/change the newton line search maximum step length.
- * \param[in] step_size maximum step length
- */
- inline void
- setStepSize (double step_size)
- {
- step_size_ = step_size;
- }
-
- /** \brief Get the point cloud outlier ratio.
- * \return outlier ratio
- */
- inline double
- getOulierRatio () const
- {
- return (outlier_ratio_);
- }
-
- /** \brief Set/change the point cloud outlier ratio.
- * \param[in] outlier_ratio outlier ratio
- */
- inline void
- setOulierRatio (double outlier_ratio)
- {
- outlier_ratio_ = outlier_ratio;
- }
-
- /** \brief Get the registration alignment probability.
- * \return transformation probability
- */
- inline double
- getTransformationProbability () const
- {
- return (trans_probability_);
- }
-
- /** \brief Get the number of iterations required to calculate alignment.
- * \return final number of iterations
- */
- inline int
- getFinalNumIteration () const
- {
- return (nr_iterations_);
- }
-
- /** \brief Convert 6 element transformation vector to affine transformation.
- * \param[in] x transformation vector of the form [x, y, z, roll, pitch, yaw]
- * \param[out] trans affine transform corresponding to given transfomation vector
- */
- static void
- convertTransform (const Eigen::Matrix<double, 6, 1> &x, Eigen::Affine3f &trans)
- {
- trans = Eigen::Translation<float, 3>(float (x (0)), float (x (1)), float (x (2))) *
- Eigen::AngleAxis<float>(float (x (3)), Eigen::Vector3f::UnitX ()) *
- Eigen::AngleAxis<float>(float (x (4)), Eigen::Vector3f::UnitY ()) *
- Eigen::AngleAxis<float>(float (x (5)), Eigen::Vector3f::UnitZ ());
- }
-
- /** \brief Convert 6 element transformation vector to transformation matrix.
- * \param[in] x transformation vector of the form [x, y, z, roll, pitch, yaw]
- * \param[out] trans 4x4 transformation matrix corresponding to given transfomation vector
- */
- static void
- convertTransform (const Eigen::Matrix<double, 6, 1> &x, Eigen::Matrix4f &trans)
- {
- Eigen::Affine3f _affine;
- convertTransform (x, _affine);
- trans = _affine.matrix ();
- }
-
- protected:
-
- using Registration<PointSource, PointTarget>::reg_name_;
- using Registration<PointSource, PointTarget>::getClassName;
- using Registration<PointSource, PointTarget>::input_;
- using Registration<PointSource, PointTarget>::indices_;
- using Registration<PointSource, PointTarget>::target_;
- using Registration<PointSource, PointTarget>::nr_iterations_;
- using Registration<PointSource, PointTarget>::max_iterations_;
- using Registration<PointSource, PointTarget>::previous_transformation_;
- using Registration<PointSource, PointTarget>::final_transformation_;
- using Registration<PointSource, PointTarget>::transformation_;
- using Registration<PointSource, PointTarget>::transformation_epsilon_;
- using Registration<PointSource, PointTarget>::transformation_rotation_epsilon_;
- using Registration<PointSource, PointTarget>::converged_;
- using Registration<PointSource, PointTarget>::corr_dist_threshold_;
- using Registration<PointSource, PointTarget>::inlier_threshold_;
-
- using Registration<PointSource, PointTarget>::update_visualizer_;
-
- /** \brief Estimate the transformation and returns the transformed source (input) as output.
- * \param[out] output the resultant input transformed point cloud dataset
- */
- virtual void
- computeTransformation (PointCloudSource &output)
- {
- computeTransformation (output, Eigen::Matrix4f::Identity ());
- }
-
- /** \brief Estimate the transformation and returns the transformed source (input) as output.
- * \param[out] output the resultant input transformed point cloud dataset
- * \param[in] guess the initial gross estimation of the transformation
- */
- void
- computeTransformation (PointCloudSource &output, const Eigen::Matrix4f &guess) override;
-
- /** \brief Initiate covariance voxel structure. */
- void inline
- init ()
- {
- target_cells_.setLeafSize (resolution_, resolution_, resolution_);
- target_cells_.setInputCloud ( target_ );
- // Initiate voxel structure.
- target_cells_.filter (true);
+ Registration<PointSource, PointTarget>::setInputTarget(cloud);
+ init();
+ }
+
+ /** \brief Set/change the voxel grid resolution.
+ * \param[in] resolution side length of voxels
+ */
+ inline void
+ setResolution(float resolution)
+ {
+ // Prevents unnessary voxel initiations
+ if (resolution_ != resolution) {
+ resolution_ = resolution;
+ if (input_) {
+ init();
}
-
- /** \brief Compute derivatives of probability function w.r.t. the transformation vector.
- * \note Equation 6.10, 6.12 and 6.13 [Magnusson 2009].
- * \param[out] score_gradient the gradient vector of the probability function w.r.t. the transformation vector
- * \param[out] hessian the hessian matrix of the probability function w.r.t. the transformation vector
- * \param[in] trans_cloud transformed point cloud
- * \param[in] p the current transform vector
- * \param[in] compute_hessian flag to calculate hessian, unnessissary for step calculation.
- */
- double
- computeDerivatives (Eigen::Matrix<double, 6, 1> &score_gradient,
- Eigen::Matrix<double, 6, 6> &hessian,
- PointCloudSource &trans_cloud,
- Eigen::Matrix<double, 6, 1> &p,
+ }
+ }
+
+ /** \brief Get voxel grid resolution.
+ * \return side length of voxels
+ */
+ inline float
+ getResolution() const
+ {
+ return resolution_;
+ }
+
+ /** \brief Get the newton line search maximum step length.
+ * \return maximum step length
+ */
+ inline double
+ getStepSize() const
+ {
+ return step_size_;
+ }
+
+ /** \brief Set/change the newton line search maximum step length.
+ * \param[in] step_size maximum step length
+ */
+ inline void
+ setStepSize(double step_size)
+ {
+ step_size_ = step_size;
+ }
+
+ /** \brief Get the point cloud outlier ratio.
+ * \return outlier ratio
+ */
+ inline double
+ getOulierRatio() const
+ {
+ return outlier_ratio_;
+ }
+
+ /** \brief Set/change the point cloud outlier ratio.
+ * \param[in] outlier_ratio outlier ratio
+ */
+ inline void
+ setOulierRatio(double outlier_ratio)
+ {
+ outlier_ratio_ = outlier_ratio;
+ }
+
+ /** \brief Get the registration alignment probability.
+ * \return transformation probability
+ */
+ inline double
+ getTransformationProbability() const
+ {
+ return trans_probability_;
+ }
+
+ /** \brief Get the number of iterations required to calculate alignment.
+ * \return final number of iterations
+ */
+ inline int
+ getFinalNumIteration() const
+ {
+ return nr_iterations_;
+ }
+
+ /** \brief Convert 6 element transformation vector to affine transformation.
+ * \param[in] x transformation vector of the form [x, y, z, roll, pitch, yaw]
+ * \param[out] trans affine transform corresponding to given transfomation vector
+ */
+ static void
+ convertTransform(const Eigen::Matrix<double, 6, 1>& x, Eigen::Affine3f& trans)
+ {
+ trans = Eigen::Translation<float, 3>(x.head<3>().cast<float>()) *
+ Eigen::AngleAxis<float>(float(x(3)), Eigen::Vector3f::UnitX()) *
+ Eigen::AngleAxis<float>(float(x(4)), Eigen::Vector3f::UnitY()) *
+ Eigen::AngleAxis<float>(float(x(5)), Eigen::Vector3f::UnitZ());
+ }
+
+ /** \brief Convert 6 element transformation vector to transformation matrix.
+ * \param[in] x transformation vector of the form [x, y, z, roll, pitch, yaw]
+ * \param[out] trans 4x4 transformation matrix corresponding to given transfomation
+ * vector
+ */
+ static void
+ convertTransform(const Eigen::Matrix<double, 6, 1>& x, Eigen::Matrix4f& trans)
+ {
+ Eigen::Affine3f _affine;
+ convertTransform(x, _affine);
+ trans = _affine.matrix();
+ }
+
+protected:
+ using Registration<PointSource, PointTarget>::reg_name_;
+ using Registration<PointSource, PointTarget>::getClassName;
+ using Registration<PointSource, PointTarget>::input_;
+ using Registration<PointSource, PointTarget>::indices_;
+ using Registration<PointSource, PointTarget>::target_;
+ using Registration<PointSource, PointTarget>::nr_iterations_;
+ using Registration<PointSource, PointTarget>::max_iterations_;
+ using Registration<PointSource, PointTarget>::previous_transformation_;
+ using Registration<PointSource, PointTarget>::final_transformation_;
+ using Registration<PointSource, PointTarget>::transformation_;
+ using Registration<PointSource, PointTarget>::transformation_epsilon_;
+ using Registration<PointSource, PointTarget>::transformation_rotation_epsilon_;
+ using Registration<PointSource, PointTarget>::converged_;
+ using Registration<PointSource, PointTarget>::corr_dist_threshold_;
+ using Registration<PointSource, PointTarget>::inlier_threshold_;
+
+ using Registration<PointSource, PointTarget>::update_visualizer_;
+
+ /** \brief Estimate the transformation and returns the transformed source (input) as
+ * output. \param[out] output the resultant input transformed point cloud dataset
+ */
+ virtual void
+ computeTransformation(PointCloudSource& output)
+ {
+ computeTransformation(output, Eigen::Matrix4f::Identity());
+ }
+
+ /** \brief Estimate the transformation and returns the transformed source (input) as
+ * output. \param[out] output the resultant input transformed point cloud dataset
+ * \param[in] guess the initial gross estimation of the transformation
+ */
+ void
+ computeTransformation(PointCloudSource& output,
+ const Eigen::Matrix4f& guess) override;
+
+ /** \brief Initiate covariance voxel structure. */
+ void inline init()
+ {
+ target_cells_.setLeafSize(resolution_, resolution_, resolution_);
+ target_cells_.setInputCloud(target_);
+ // Initiate voxel structure.
+ target_cells_.filter(true);
+ }
+
+ /** \brief Compute derivatives of probability function w.r.t. the transformation
+ * vector. \note Equation 6.10, 6.12 and 6.13 [Magnusson 2009]. \param[out]
+ * score_gradient the gradient vector of the probability function w.r.t. the
+ * transformation vector \param[out] hessian the hessian matrix of the probability
+ * function w.r.t. the transformation vector \param[in] trans_cloud transformed point
+ * cloud \param[in] transform the current transform vector \param[in] compute_hessian
+ * flag to calculate hessian, unnessissary for step calculation.
+ */
+ double
+ computeDerivatives(Eigen::Matrix<double, 6, 1>& score_gradient,
+ Eigen::Matrix<double, 6, 6>& hessian,
+ const PointCloudSource& trans_cloud,
+ const Eigen::Matrix<double, 6, 1>& transform,
+ bool compute_hessian = true);
+
+ /** \brief Compute individual point contirbutions to derivatives of probability
+ * function w.r.t. the transformation vector. \note Equation 6.10, 6.12 and 6.13
+ * [Magnusson 2009]. \param[in,out] score_gradient the gradient vector of the
+ * probability function w.r.t. the transformation vector \param[in,out] hessian the
+ * hessian matrix of the probability function w.r.t. the transformation vector
+ * \param[in] x_trans transformed point minus mean of occupied covariance voxel
+ * \param[in] c_inv covariance of occupied covariance voxel
+ * \param[in] compute_hessian flag to calculate hessian, unnessissary for step
+ * calculation.
+ */
+ double
+ updateDerivatives(Eigen::Matrix<double, 6, 1>& score_gradient,
+ Eigen::Matrix<double, 6, 6>& hessian,
+ const Eigen::Vector3d& x_trans,
+ const Eigen::Matrix3d& c_inv,
+ bool compute_hessian = true) const;
+
+ /** \brief Precompute anglular components of derivatives.
+ * \note Equation 6.19 and 6.21 [Magnusson 2009].
+ * \param[in] transform the current transform vector
+ * \param[in] compute_hessian flag to calculate hessian, unnessissary for step
+ * calculation.
+ */
+ void
+ computeAngleDerivatives(const Eigen::Matrix<double, 6, 1>& transform,
bool compute_hessian = true);
- /** \brief Compute individual point contirbutions to derivatives of probability function w.r.t. the transformation vector.
- * \note Equation 6.10, 6.12 and 6.13 [Magnusson 2009].
- * \param[in,out] score_gradient the gradient vector of the probability function w.r.t. the transformation vector
- * \param[in,out] hessian the hessian matrix of the probability function w.r.t. the transformation vector
- * \param[in] x_trans transformed point minus mean of occupied covariance voxel
- * \param[in] c_inv covariance of occupied covariance voxel
- * \param[in] compute_hessian flag to calculate hessian, unnessissary for step calculation.
- */
- double
- updateDerivatives (Eigen::Matrix<double, 6, 1> &score_gradient,
- Eigen::Matrix<double, 6, 6> &hessian,
- Eigen::Vector3d &x_trans, Eigen::Matrix3d &c_inv,
- bool compute_hessian = true);
-
- /** \brief Precompute anglular components of derivatives.
- * \note Equation 6.19 and 6.21 [Magnusson 2009].
- * \param[in] p the current transform vector
- * \param[in] compute_hessian flag to calculate hessian, unnessissary for step calculation.
- */
- void
- computeAngleDerivatives (Eigen::Matrix<double, 6, 1> &p, bool compute_hessian = true);
-
- /** \brief Compute point derivatives.
- * \note Equation 6.18-21 [Magnusson 2009].
- * \param[in] x point from the input cloud
- * \param[in] compute_hessian flag to calculate hessian, unnessissary for step calculation.
- */
- void
- computePointDerivatives (Eigen::Vector3d &x, bool compute_hessian = true);
-
- /** \brief Compute hessian of probability function w.r.t. the transformation vector.
- * \note Equation 6.13 [Magnusson 2009].
- * \param[out] hessian the hessian matrix of the probability function w.r.t. the transformation vector
- * \param[in] trans_cloud transformed point cloud
- * \param[in] p the current transform vector
- */
- void
- computeHessian (Eigen::Matrix<double, 6, 6> &hessian,
- PointCloudSource &trans_cloud,
- Eigen::Matrix<double, 6, 1> &p);
-
- /** \brief Compute individual point contirbutions to hessian of probability function w.r.t. the transformation vector.
- * \note Equation 6.13 [Magnusson 2009].
- * \param[in,out] hessian the hessian matrix of the probability function w.r.t. the transformation vector
- * \param[in] x_trans transformed point minus mean of occupied covariance voxel
- * \param[in] c_inv covariance of occupied covariance voxel
- */
- void
- updateHessian (Eigen::Matrix<double, 6, 6> &hessian,
- Eigen::Vector3d &x_trans, Eigen::Matrix3d &c_inv);
-
- /** \brief Compute line search step length and update transform and probability derivatives using More-Thuente method.
- * \note Search Algorithm [More, Thuente 1994]
- * \param[in] x initial transformation vector, \f$ x \f$ in Equation 1.3 (Moore, Thuente 1994) and \f$ \vec{p} \f$ in Algorithm 2 [Magnusson 2009]
- * \param[in] step_dir descent direction, \f$ p \f$ in Equation 1.3 (Moore, Thuente 1994) and \f$ \delta \vec{p} \f$ normalized in Algorithm 2 [Magnusson 2009]
- * \param[in] step_init initial step length estimate, \f$ \alpha_0 \f$ in Moore-Thuente (1994) and the noramal of \f$ \delta \vec{p} \f$ in Algorithm 2 [Magnusson 2009]
- * \param[in] step_max maximum step length, \f$ \alpha_max \f$ in Moore-Thuente (1994)
- * \param[in] step_min minimum step length, \f$ \alpha_min \f$ in Moore-Thuente (1994)
- * \param[out] score final score function value, \f$ f(x + \alpha p) \f$ in Equation 1.3 (Moore, Thuente 1994) and \f$ score \f$ in Algorithm 2 [Magnusson 2009]
- * \param[in,out] score_gradient gradient of score function w.r.t. transformation vector, \f$ f'(x + \alpha p) \f$ in Moore-Thuente (1994) and \f$ \vec{g} \f$ in Algorithm 2 [Magnusson 2009]
- * \param[out] hessian hessian of score function w.r.t. transformation vector, \f$ f''(x + \alpha p) \f$ in Moore-Thuente (1994) and \f$ H \f$ in Algorithm 2 [Magnusson 2009]
- * \param[in,out] trans_cloud transformed point cloud, \f$ X \f$ transformed by \f$ T(\vec{p},\vec{x}) \f$ in Algorithm 2 [Magnusson 2009]
- * \return final step length
- */
- double
- computeStepLengthMT (const Eigen::Matrix<double, 6, 1> &x,
- Eigen::Matrix<double, 6, 1> &step_dir,
- double step_init,
- double step_max, double step_min,
- double &score,
- Eigen::Matrix<double, 6, 1> &score_gradient,
- Eigen::Matrix<double, 6, 6> &hessian,
- PointCloudSource &trans_cloud);
-
- /** \brief Update interval of possible step lengths for More-Thuente method, \f$ I \f$ in More-Thuente (1994)
- * \note Updating Algorithm until some value satisfies \f$ \psi(\alpha_k) \leq 0 \f$ and \f$ \phi'(\alpha_k) \geq 0 \f$
- * and Modified Updating Algorithm from then on [More, Thuente 1994].
- * \param[in,out] a_l first endpoint of interval \f$ I \f$, \f$ \alpha_l \f$ in Moore-Thuente (1994)
- * \param[in,out] f_l value at first endpoint, \f$ f_l \f$ in Moore-Thuente (1994), \f$ \psi(\alpha_l) \f$ for Update Algorithm and \f$ \phi(\alpha_l) \f$ for Modified Update Algorithm
- * \param[in,out] g_l derivative at first endpoint, \f$ g_l \f$ in Moore-Thuente (1994), \f$ \psi'(\alpha_l) \f$ for Update Algorithm and \f$ \phi'(\alpha_l) \f$ for Modified Update Algorithm
- * \param[in,out] a_u second endpoint of interval \f$ I \f$, \f$ \alpha_u \f$ in Moore-Thuente (1994)
- * \param[in,out] f_u value at second endpoint, \f$ f_u \f$ in Moore-Thuente (1994), \f$ \psi(\alpha_u) \f$ for Update Algorithm and \f$ \phi(\alpha_u) \f$ for Modified Update Algorithm
- * \param[in,out] g_u derivative at second endpoint, \f$ g_u \f$ in Moore-Thuente (1994), \f$ \psi'(\alpha_u) \f$ for Update Algorithm and \f$ \phi'(\alpha_u) \f$ for Modified Update Algorithm
- * \param[in] a_t trial value, \f$ \alpha_t \f$ in Moore-Thuente (1994)
- * \param[in] f_t value at trial value, \f$ f_t \f$ in Moore-Thuente (1994), \f$ \psi(\alpha_t) \f$ for Update Algorithm and \f$ \phi(\alpha_t) \f$ for Modified Update Algorithm
- * \param[in] g_t derivative at trial value, \f$ g_t \f$ in Moore-Thuente (1994), \f$ \psi'(\alpha_t) \f$ for Update Algorithm and \f$ \phi'(\alpha_t) \f$ for Modified Update Algorithm
- * \return if interval converges
- */
- bool
- updateIntervalMT (double &a_l, double &f_l, double &g_l,
- double &a_u, double &f_u, double &g_u,
- double a_t, double f_t, double g_t);
-
- /** \brief Select new trial value for More-Thuente method.
- * \note Trial Value Selection [More, Thuente 1994], \f$ \psi(\alpha_k) \f$ is used for \f$ f_k \f$ and \f$ g_k \f$
- * until some value satisfies the test \f$ \psi(\alpha_k) \leq 0 \f$ and \f$ \phi'(\alpha_k) \geq 0 \f$
- * then \f$ \phi(\alpha_k) \f$ is used from then on.
- * \note Interpolation Minimizer equations from Optimization Theory and Methods: Nonlinear Programming By Wenyu Sun, Ya-xiang Yuan (89-100).
- * \param[in] a_l first endpoint of interval \f$ I \f$, \f$ \alpha_l \f$ in Moore-Thuente (1994)
- * \param[in] f_l value at first endpoint, \f$ f_l \f$ in Moore-Thuente (1994)
- * \param[in] g_l derivative at first endpoint, \f$ g_l \f$ in Moore-Thuente (1994)
- * \param[in] a_u second endpoint of interval \f$ I \f$, \f$ \alpha_u \f$ in Moore-Thuente (1994)
- * \param[in] f_u value at second endpoint, \f$ f_u \f$ in Moore-Thuente (1994)
- * \param[in] g_u derivative at second endpoint, \f$ g_u \f$ in Moore-Thuente (1994)
- * \param[in] a_t previous trial value, \f$ \alpha_t \f$ in Moore-Thuente (1994)
- * \param[in] f_t value at previous trial value, \f$ f_t \f$ in Moore-Thuente (1994)
- * \param[in] g_t derivative at previous trial value, \f$ g_t \f$ in Moore-Thuente (1994)
- * \return new trial value
- */
- double
- trialValueSelectionMT (double a_l, double f_l, double g_l,
- double a_u, double f_u, double g_u,
- double a_t, double f_t, double g_t);
-
- /** \brief Auxiliary function used to determine endpoints of More-Thuente interval.
- * \note \f$ \psi(\alpha) \f$ in Equation 1.6 (Moore, Thuente 1994)
- * \param[in] a the step length, \f$ \alpha \f$ in More-Thuente (1994)
- * \param[in] f_a function value at step length a, \f$ \phi(\alpha) \f$ in More-Thuente (1994)
- * \param[in] f_0 initial function value, \f$ \phi(0) \f$ in Moore-Thuente (1994)
- * \param[in] g_0 initial function gradiant, \f$ \phi'(0) \f$ in More-Thuente (1994)
- * \param[in] mu the step length, constant \f$ \mu \f$ in Equation 1.1 [More, Thuente 1994]
- * \return sufficient decrease value
- */
- inline double
- auxilaryFunction_PsiMT (double a, double f_a, double f_0, double g_0, double mu = 1.e-4)
- {
- return (f_a - f_0 - mu * g_0 * a);
- }
-
- /** \brief Auxiliary function derivative used to determine endpoints of More-Thuente interval.
- * \note \f$ \psi'(\alpha) \f$, derivative of Equation 1.6 (Moore, Thuente 1994)
- * \param[in] g_a function gradient at step length a, \f$ \phi'(\alpha) \f$ in More-Thuente (1994)
- * \param[in] g_0 initial function gradiant, \f$ \phi'(0) \f$ in More-Thuente (1994)
- * \param[in] mu the step length, constant \f$ \mu \f$ in Equation 1.1 [More, Thuente 1994]
- * \return sufficient decrease derivative
- */
- inline double
- auxilaryFunction_dPsiMT (double g_a, double g_0, double mu = 1.e-4)
- {
- return (g_a - mu * g_0);
- }
-
- /** \brief The voxel grid generated from target cloud containing point means and covariances. */
- TargetGrid target_cells_;
-
- //double fitness_epsilon_;
-
- /** \brief The side length of voxels. */
- float resolution_;
-
- /** \brief The maximum step length. */
- double step_size_;
-
- /** \brief The ratio of outliers of points w.r.t. a normal distribution, Equation 6.7 [Magnusson 2009]. */
- double outlier_ratio_;
-
- /** \brief The normalization constants used fit the point distribution to a normal distribution, Equation 6.8 [Magnusson 2009]. */
- double gauss_d1_, gauss_d2_;
-
- /** \brief The probability score of the transform applied to the input cloud, Equation 6.9 and 6.10 [Magnusson 2009]. */
- double trans_probability_;
-
- /** \brief Precomputed Angular Gradient
- *
- * The precomputed angular derivatives for the jacobian of a transformation vector, Equation 6.19 [Magnusson 2009].
- */
- Eigen::Vector3d j_ang_a_, j_ang_b_, j_ang_c_, j_ang_d_, j_ang_e_, j_ang_f_, j_ang_g_, j_ang_h_;
-
- /** \brief Precomputed Angular Hessian
- *
- * The precomputed angular derivatives for the hessian of a transformation vector, Equation 6.19 [Magnusson 2009].
- */
- Eigen::Vector3d h_ang_a2_, h_ang_a3_,
- h_ang_b2_, h_ang_b3_,
- h_ang_c2_, h_ang_c3_,
- h_ang_d1_, h_ang_d2_, h_ang_d3_,
- h_ang_e1_, h_ang_e2_, h_ang_e3_,
- h_ang_f1_, h_ang_f2_, h_ang_f3_;
-
- /** \brief The first order derivative of the transformation of a point w.r.t. the transform vector, \f$ J_E \f$ in Equation 6.18 [Magnusson 2009]. */
- Eigen::Matrix<double, 3, 6> point_gradient_;
-
- /** \brief The second order derivative of the transformation of a point w.r.t. the transform vector, \f$ H_E \f$ in Equation 6.20 [Magnusson 2009]. */
- Eigen::Matrix<double, 18, 6> point_hessian_;
-
- public:
- PCL_MAKE_ALIGNED_OPERATOR_NEW
- };
-}
+ /** \brief Compute point derivatives.
+ * \note Equation 6.18-21 [Magnusson 2009].
+ * \param[in] x point from the input cloud
+ * \param[in] compute_hessian flag to calculate hessian, unnessissary for step
+ * calculation.
+ */
+ void
+ computePointDerivatives(const Eigen::Vector3d& x, bool compute_hessian = true);
+
+ /** \brief Compute hessian of probability function w.r.t. the transformation vector.
+ * \note Equation 6.13 [Magnusson 2009].
+ * \param[out] hessian the hessian matrix of the probability function w.r.t. the
+ * transformation vector \param[in] trans_cloud transformed point cloud
+ */
+ void
+ computeHessian(Eigen::Matrix<double, 6, 6>& hessian,
+ const PointCloudSource& trans_cloud);
+
+ /** \brief Compute hessian of probability function w.r.t. the transformation vector.
+ * \note Equation 6.13 [Magnusson 2009].
+ * \param[out] hessian the hessian matrix of the probability function w.r.t. the
+ * transformation vector \param[in] trans_cloud transformed point cloud \param[in]
+ * transform the current transform vector
+ */
+ PCL_DEPRECATED(1, 15, "Parameter `transform` is not required")
+ void
+ computeHessian(Eigen::Matrix<double, 6, 6>& hessian,
+ const PointCloudSource& trans_cloud,
+ const Eigen::Matrix<double, 6, 1>& transform)
+ {
+ pcl::utils::ignore(transform);
+ computeHessian(hessian, trans_cloud);
+ }
+
+ /** \brief Compute individual point contirbutions to hessian of probability function
+ * w.r.t. the transformation vector. \note Equation 6.13 [Magnusson 2009].
+ * \param[in,out] hessian the hessian matrix of the probability function w.r.t. the
+ * transformation vector \param[in] x_trans transformed point minus mean of occupied
+ * covariance voxel \param[in] c_inv covariance of occupied covariance voxel
+ */
+ void
+ updateHessian(Eigen::Matrix<double, 6, 6>& hessian,
+ const Eigen::Vector3d& x_trans,
+ const Eigen::Matrix3d& c_inv) const;
+
+ /** \brief Compute line search step length and update transform and probability
+ * derivatives using More-Thuente method. \note Search Algorithm [More, Thuente 1994]
+ * \param[in] transform initial transformation vector, \f$ x \f$ in Equation 1.3
+ * (Moore, Thuente 1994) and \f$ \vec{p} \f$ in Algorithm 2 [Magnusson 2009]
+ * \param[in] step_dir descent direction, \f$ p \f$ in Equation 1.3 (Moore, Thuente
+ * 1994) and \f$ \delta \vec{p} \f$ normalized in Algorithm 2 [Magnusson 2009]
+ * \param[in] step_init initial step length estimate, \f$ \alpha_0 \f$ in
+ * Moore-Thuente (1994) and the noramal of \f$ \delta \vec{p} \f$ in Algorithm 2
+ * [Magnusson 2009] \param[in] step_max maximum step length, \f$ \alpha_max \f$ in
+ * Moore-Thuente (1994) \param[in] step_min minimum step length, \f$ \alpha_min \f$ in
+ * Moore-Thuente (1994) \param[out] score final score function value, \f$ f(x + \alpha
+ * p) \f$ in Equation 1.3 (Moore, Thuente 1994) and \f$ score \f$ in Algorithm 2
+ * [Magnusson 2009] \param[in,out] score_gradient gradient of score function w.r.t.
+ * transformation vector, \f$ f'(x + \alpha p) \f$ in Moore-Thuente (1994) and \f$
+ * \vec{g} \f$ in Algorithm 2 [Magnusson 2009] \param[out] hessian hessian of score
+ * function w.r.t. transformation vector, \f$ f''(x + \alpha p) \f$ in Moore-Thuente
+ * (1994) and \f$ H \f$ in Algorithm 2 [Magnusson 2009] \param[in,out] trans_cloud
+ * transformed point cloud, \f$ X \f$ transformed by \f$ T(\vec{p},\vec{x}) \f$ in
+ * Algorithm 2 [Magnusson 2009] \return final step length
+ */
+ double
+ computeStepLengthMT(const Eigen::Matrix<double, 6, 1>& transform,
+ Eigen::Matrix<double, 6, 1>& step_dir,
+ double step_init,
+ double step_max,
+ double step_min,
+ double& score,
+ Eigen::Matrix<double, 6, 1>& score_gradient,
+ Eigen::Matrix<double, 6, 6>& hessian,
+ PointCloudSource& trans_cloud);
+
+ /** \brief Update interval of possible step lengths for More-Thuente method, \f$ I \f$
+ * in More-Thuente (1994) \note Updating Algorithm until some value satisfies \f$
+ * \psi(\alpha_k) \leq 0 \f$ and \f$ \phi'(\alpha_k) \geq 0 \f$ and Modified Updating
+ * Algorithm from then on [More, Thuente 1994]. \param[in,out] a_l first endpoint of
+ * interval \f$ I \f$, \f$ \alpha_l \f$ in Moore-Thuente (1994) \param[in,out] f_l
+ * value at first endpoint, \f$ f_l \f$ in Moore-Thuente (1994), \f$ \psi(\alpha_l)
+ * \f$ for Update Algorithm and \f$ \phi(\alpha_l) \f$ for Modified Update Algorithm
+ * \param[in,out] g_l derivative at first endpoint, \f$ g_l \f$ in Moore-Thuente
+ * (1994), \f$ \psi'(\alpha_l) \f$ for Update Algorithm and \f$ \phi'(\alpha_l) \f$
+ * for Modified Update Algorithm \param[in,out] a_u second endpoint of interval \f$ I
+ * \f$, \f$ \alpha_u \f$ in Moore-Thuente (1994) \param[in,out] f_u value at second
+ * endpoint, \f$ f_u \f$ in Moore-Thuente (1994), \f$ \psi(\alpha_u) \f$ for Update
+ * Algorithm and \f$ \phi(\alpha_u) \f$ for Modified Update Algorithm \param[in,out]
+ * g_u derivative at second endpoint, \f$ g_u \f$ in Moore-Thuente (1994), \f$
+ * \psi'(\alpha_u) \f$ for Update Algorithm and \f$ \phi'(\alpha_u) \f$ for Modified
+ * Update Algorithm \param[in] a_t trial value, \f$ \alpha_t \f$ in Moore-Thuente
+ * (1994) \param[in] f_t value at trial value, \f$ f_t \f$ in Moore-Thuente (1994),
+ * \f$ \psi(\alpha_t) \f$ for Update Algorithm and \f$ \phi(\alpha_t) \f$ for Modified
+ * Update Algorithm \param[in] g_t derivative at trial value, \f$ g_t \f$ in
+ * Moore-Thuente (1994), \f$ \psi'(\alpha_t) \f$ for Update Algorithm and \f$
+ * \phi'(\alpha_t) \f$ for Modified Update Algorithm \return if interval converges
+ */
+ bool
+ updateIntervalMT(double& a_l,
+ double& f_l,
+ double& g_l,
+ double& a_u,
+ double& f_u,
+ double& g_u,
+ double a_t,
+ double f_t,
+ double g_t) const;
+
+ /** \brief Select new trial value for More-Thuente method.
+ * \note Trial Value Selection [More, Thuente 1994], \f$ \psi(\alpha_k) \f$ is used
+ * for \f$ f_k \f$ and \f$ g_k \f$ until some value satisfies the test \f$
+ * \psi(\alpha_k) \leq 0 \f$ and \f$ \phi'(\alpha_k) \geq 0 \f$ then \f$
+ * \phi(\alpha_k) \f$ is used from then on. \note Interpolation Minimizer equations
+ * from Optimization Theory and Methods: Nonlinear Programming By Wenyu Sun, Ya-xiang
+ * Yuan (89-100). \param[in] a_l first endpoint of interval \f$ I \f$, \f$ \alpha_l
+ * \f$ in Moore-Thuente (1994) \param[in] f_l value at first endpoint, \f$ f_l \f$ in
+ * Moore-Thuente (1994) \param[in] g_l derivative at first endpoint, \f$ g_l \f$ in
+ * Moore-Thuente (1994) \param[in] a_u second endpoint of interval \f$ I \f$, \f$
+ * \alpha_u \f$ in Moore-Thuente (1994) \param[in] f_u value at second endpoint, \f$
+ * f_u \f$ in Moore-Thuente (1994) \param[in] g_u derivative at second endpoint, \f$
+ * g_u \f$ in Moore-Thuente (1994) \param[in] a_t previous trial value, \f$ \alpha_t
+ * \f$ in Moore-Thuente (1994) \param[in] f_t value at previous trial value, \f$ f_t
+ * \f$ in Moore-Thuente (1994) \param[in] g_t derivative at previous trial value, \f$
+ * g_t \f$ in Moore-Thuente (1994) \return new trial value
+ */
+ double
+ trialValueSelectionMT(double a_l,
+ double f_l,
+ double g_l,
+ double a_u,
+ double f_u,
+ double g_u,
+ double a_t,
+ double f_t,
+ double g_t) const;
+
+ /** \brief Auxiliary function used to determine endpoints of More-Thuente interval.
+ * \note \f$ \psi(\alpha) \f$ in Equation 1.6 (Moore, Thuente 1994)
+ * \param[in] a the step length, \f$ \alpha \f$ in More-Thuente (1994)
+ * \param[in] f_a function value at step length a, \f$ \phi(\alpha) \f$ in
+ * More-Thuente (1994) \param[in] f_0 initial function value, \f$ \phi(0) \f$ in
+ * Moore-Thuente (1994) \param[in] g_0 initial function gradient, \f$ \phi'(0) \f$ in
+ * More-Thuente (1994) \param[in] mu the step length, constant \f$ \mu \f$ in
+ * Equation 1.1 [More, Thuente 1994] \return sufficient decrease value
+ */
+ inline double
+ auxilaryFunction_PsiMT(
+ double a, double f_a, double f_0, double g_0, double mu = 1.e-4) const
+ {
+ return f_a - f_0 - mu * g_0 * a;
+ }
+
+ /** \brief Auxiliary function derivative used to determine endpoints of More-Thuente
+ * interval. \note \f$ \psi'(\alpha) \f$, derivative of Equation 1.6 (Moore, Thuente
+ * 1994) \param[in] g_a function gradient at step length a, \f$ \phi'(\alpha) \f$ in
+ * More-Thuente (1994) \param[in] g_0 initial function gradient, \f$ \phi'(0) \f$ in
+ * More-Thuente (1994) \param[in] mu the step length, constant \f$ \mu \f$ in
+ * Equation 1.1 [More, Thuente 1994] \return sufficient decrease derivative
+ */
+ inline double
+ auxilaryFunction_dPsiMT(double g_a, double g_0, double mu = 1.e-4) const
+ {
+ return g_a - mu * g_0;
+ }
+
+ /** \brief The voxel grid generated from target cloud containing point means and
+ * covariances. */
+ TargetGrid target_cells_;
+
+ /** \brief The side length of voxels. */
+ float resolution_;
+
+ /** \brief The maximum step length. */
+ double step_size_;
+
+ /** \brief The ratio of outliers of points w.r.t. a normal distribution, Equation 6.7
+ * [Magnusson 2009]. */
+ double outlier_ratio_;
+
+ /** \brief The normalization constants used fit the point distribution to a normal
+ * distribution, Equation 6.8 [Magnusson 2009]. */
+ double gauss_d1_, gauss_d2_;
+
+ /** \brief The probability score of the transform applied to the input cloud,
+ * Equation 6.9 and 6.10 [Magnusson 2009]. */
+ double trans_probability_;
+
+ /** \brief Precomputed Angular Gradient
+ *
+ * The precomputed angular derivatives for the jacobian of a transformation vector,
+ * Equation 6.19 [Magnusson 2009].
+ */
+ Eigen::Matrix<double, 8, 4> angular_jacobian_;
+
+ /** \brief Precomputed Angular Hessian
+ *
+ * The precomputed angular derivatives for the hessian of a transformation vector,
+ * Equation 6.19 [Magnusson 2009].
+ */
+ Eigen::Matrix<double, 15, 4> angular_hessian_;
+
+ /** \brief The first order derivative of the transformation of a point w.r.t. the
+ * transform vector, \f$ J_E \f$ in Equation 6.18 [Magnusson 2009]. */
+ Eigen::Matrix<double, 3, 6> point_jacobian_;
+
+ /** \brief The second order derivative of the transformation of a point w.r.t. the
+ * transform vector, \f$ H_E \f$ in Equation 6.20 [Magnusson 2009]. */
+ Eigen::Matrix<double, 18, 6> point_hessian_;
+
+public:
+ PCL_MAKE_ALIGNED_OPERATOR_NEW
+};
+} // namespace pcl
#include <pcl/registration/impl/ndt.hpp>
#pragma once
-#include <pcl/memory.h>
-#include <pcl/pcl_macros.h>
#include <pcl/registration/registration.h>
+#include <pcl/memory.h>
+
+namespace pcl {
+/** \brief @b NormalDistributionsTransform2D provides an implementation of the
+ * Normal Distributions Transform algorithm for scan matching.
+ *
+ * This implementation is intended to match the definition:
+ * Peter Biber and Wolfgang Straßer. The normal distributions transform: A
+ * new approach to laser scan matching. In Proceedings of the IEEE In-
+ * ternational Conference on Intelligent Robots and Systems (IROS), pages
+ * 2743–2748, Las Vegas, USA, October 2003.
+ *
+ * \author James Crosby
+ */
+template <typename PointSource, typename PointTarget>
+class NormalDistributionsTransform2D : public Registration<PointSource, PointTarget> {
+ using PointCloudSource =
+ typename Registration<PointSource, PointTarget>::PointCloudSource;
+ using PointCloudSourcePtr = typename PointCloudSource::Ptr;
+ using PointCloudSourceConstPtr = typename PointCloudSource::ConstPtr;
+
+ using PointCloudTarget =
+ typename Registration<PointSource, PointTarget>::PointCloudTarget;
+
+ using PointIndicesPtr = PointIndices::Ptr;
+ using PointIndicesConstPtr = PointIndices::ConstPtr;
+
+public:
+ using Ptr = shared_ptr<NormalDistributionsTransform2D<PointSource, PointTarget>>;
+ using ConstPtr =
+ shared_ptr<const NormalDistributionsTransform2D<PointSource, PointTarget>>;
+
+ /** \brief Empty constructor. */
+ NormalDistributionsTransform2D()
+ : Registration<PointSource, PointTarget>()
+ , grid_centre_(0, 0)
+ , grid_step_(1, 1)
+ , grid_extent_(20, 20)
+ , newton_lambda_(1, 1, 1)
+ {
+ reg_name_ = "NormalDistributionsTransform2D";
+ }
+
+ /** \brief Empty destructor */
+ ~NormalDistributionsTransform2D() {}
-namespace pcl
-{
- /** \brief @b NormalDistributionsTransform2D provides an implementation of the
- * Normal Distributions Transform algorithm for scan matching.
- *
- * This implementation is intended to match the definition:
- * Peter Biber and Wolfgang Straßer. The normal distributions transform: A
- * new approach to laser scan matching. In Proceedings of the IEEE In-
- * ternational Conference on Intelligent Robots and Systems (IROS), pages
- * 2743–2748, Las Vegas, USA, October 2003.
- *
- * \author James Crosby
- */
- template <typename PointSource, typename PointTarget>
- class NormalDistributionsTransform2D : public Registration<PointSource, PointTarget>
+ /** \brief centre of the ndt grid (target coordinate system)
+ * \param centre value to set
+ */
+ virtual void
+ setGridCentre(const Eigen::Vector2f& centre)
+ {
+ grid_centre_ = centre;
+ }
+
+ /** \brief Grid spacing (step) of the NDT grid
+ * \param[in] step value to set
+ */
+ virtual void
+ setGridStep(const Eigen::Vector2f& step)
+ {
+ grid_step_ = step;
+ }
+
+ /** \brief NDT Grid extent (in either direction from the grid centre)
+ * \param[in] extent value to set
+ */
+ virtual void
+ setGridExtent(const Eigen::Vector2f& extent)
+ {
+ grid_extent_ = extent;
+ }
+
+ /** \brief NDT Newton optimisation step size parameter
+ * \param[in] lambda step size: 1 is simple newton optimisation, smaller values may
+ * improve convergence
+ */
+ virtual void
+ setOptimizationStepSize(const double& lambda)
+ {
+ newton_lambda_ = Eigen::Vector3d(lambda, lambda, lambda);
+ }
+
+ /** \brief NDT Newton optimisation step size parameter
+ * \param[in] lambda step size: (1,1,1) is simple newton optimisation,
+ * smaller values may improve convergence, or elements may be set to
+ * zero to prevent optimisation over some parameters
+ *
+ * This overload allows control of updates to the individual (x, y,
+ * theta) free parameters in the optimisation. If, for example, theta is
+ * believed to be close to the correct value a small value of lambda[2]
+ * should be used.
+ */
+ virtual void
+ setOptimizationStepSize(const Eigen::Vector3d& lambda)
{
- using PointCloudSource = typename Registration<PointSource, PointTarget>::PointCloudSource;
- using PointCloudSourcePtr = typename PointCloudSource::Ptr;
- using PointCloudSourceConstPtr = typename PointCloudSource::ConstPtr;
-
- using PointCloudTarget = typename Registration<PointSource, PointTarget>::PointCloudTarget;
-
- using PointIndicesPtr = PointIndices::Ptr;
- using PointIndicesConstPtr = PointIndices::ConstPtr;
-
- public:
-
- using Ptr = shared_ptr< NormalDistributionsTransform2D<PointSource, PointTarget> >;
- using ConstPtr = shared_ptr< const NormalDistributionsTransform2D<PointSource, PointTarget> >;
-
- /** \brief Empty constructor. */
- NormalDistributionsTransform2D ()
- : Registration<PointSource,PointTarget> (),
- grid_centre_ (0,0), grid_step_ (1,1), grid_extent_ (20,20), newton_lambda_ (1,1,1)
- {
- reg_name_ = "NormalDistributionsTransform2D";
- }
-
- /** \brief Empty destructor */
- ~NormalDistributionsTransform2D () {}
-
- /** \brief centre of the ndt grid (target coordinate system)
- * \param centre value to set
- */
- virtual void
- setGridCentre (const Eigen::Vector2f& centre) { grid_centre_ = centre; }
-
- /** \brief Grid spacing (step) of the NDT grid
- * \param[in] step value to set
- */
- virtual void
- setGridStep (const Eigen::Vector2f& step) { grid_step_ = step; }
-
- /** \brief NDT Grid extent (in either direction from the grid centre)
- * \param[in] extent value to set
- */
- virtual void
- setGridExtent (const Eigen::Vector2f& extent) { grid_extent_ = extent; }
-
- /** \brief NDT Newton optimisation step size parameter
- * \param[in] lambda step size: 1 is simple newton optimisation, smaller values may improve convergence
- */
- virtual void
- setOptimizationStepSize (const double& lambda) { newton_lambda_ = Eigen::Vector3d (lambda, lambda, lambda); }
-
- /** \brief NDT Newton optimisation step size parameter
- * \param[in] lambda step size: (1,1,1) is simple newton optimisation,
- * smaller values may improve convergence, or elements may be set to
- * zero to prevent optimisation over some parameters
- *
- * This overload allows control of updates to the individual (x, y,
- * theta) free parameters in the optimisation. If, for example, theta is
- * believed to be close to the correct value a small value of lambda[2]
- * should be used.
- */
- virtual void
- setOptimizationStepSize (const Eigen::Vector3d& lambda) { newton_lambda_ = lambda; }
-
- protected:
- /** \brief Rigid transformation computation method with initial guess.
- * \param[out] output the transformed input point cloud dataset using the rigid transformation found
- * \param[in] guess the initial guess of the transformation to compute
- */
- void
- computeTransformation (PointCloudSource &output, const Eigen::Matrix4f &guess) override;
-
- using Registration<PointSource, PointTarget>::reg_name_;
- using Registration<PointSource, PointTarget>::target_;
- using Registration<PointSource, PointTarget>::converged_;
- using Registration<PointSource, PointTarget>::nr_iterations_;
- using Registration<PointSource, PointTarget>::max_iterations_;
- using Registration<PointSource, PointTarget>::transformation_epsilon_;
- using Registration<PointSource, PointTarget>::transformation_rotation_epsilon_;
- using Registration<PointSource, PointTarget>::transformation_;
- using Registration<PointSource, PointTarget>::previous_transformation_;
- using Registration<PointSource, PointTarget>::final_transformation_;
- using Registration<PointSource, PointTarget>::update_visualizer_;
- using Registration<PointSource, PointTarget>::indices_;
-
- Eigen::Vector2f grid_centre_;
- Eigen::Vector2f grid_step_;
- Eigen::Vector2f grid_extent_;
- Eigen::Vector3d newton_lambda_;
- public:
- PCL_MAKE_ALIGNED_OPERATOR_NEW
- };
+ newton_lambda_ = lambda;
+ }
+
+protected:
+ /** \brief Rigid transformation computation method with initial guess.
+ * \param[out] output the transformed input point cloud dataset using the rigid
+ * transformation found \param[in] guess the initial guess of the transformation to
+ * compute
+ */
+ void
+ computeTransformation(PointCloudSource& output,
+ const Eigen::Matrix4f& guess) override;
+
+ using Registration<PointSource, PointTarget>::reg_name_;
+ using Registration<PointSource, PointTarget>::target_;
+ using Registration<PointSource, PointTarget>::converged_;
+ using Registration<PointSource, PointTarget>::nr_iterations_;
+ using Registration<PointSource, PointTarget>::max_iterations_;
+ using Registration<PointSource, PointTarget>::transformation_epsilon_;
+ using Registration<PointSource, PointTarget>::transformation_rotation_epsilon_;
+ using Registration<PointSource, PointTarget>::transformation_;
+ using Registration<PointSource, PointTarget>::previous_transformation_;
+ using Registration<PointSource, PointTarget>::final_transformation_;
+ using Registration<PointSource, PointTarget>::update_visualizer_;
+ using Registration<PointSource, PointTarget>::indices_;
+
+ Eigen::Vector2f grid_centre_;
+ Eigen::Vector2f grid_step_;
+ Eigen::Vector2f grid_extent_;
+ Eigen::Vector3d newton_lambda_;
+
+public:
+ PCL_MAKE_ALIGNED_OPERATOR_NEW
+};
} // namespace pcl
#include <pcl/registration/graph_registration.h>
#include <pcl/registration/registration.h>
-namespace pcl
-{
- /** \brief @b PairwiseGraphRegistration class aligns the clouds two by two
- * \author Nicola Fioraio
- * \ingroup registration
- */
- template <typename GraphT, typename PointT>
- class PairwiseGraphRegistration : public GraphRegistration<GraphT>
- {
- public:
- using GraphRegistration<GraphT>::graph_handler_;
- using GraphRegistration<GraphT>::last_aligned_vertex_;
- using GraphRegistration<GraphT>::last_vertices_;
-
- using RegistrationPtr = typename Registration<PointT, PointT>::Ptr;
- using GraphHandlerVertex = typename pcl::registration::GraphHandler<GraphT>::Vertex;
+namespace pcl {
+/** \brief @b PairwiseGraphRegistration class aligns the clouds two by two
+ * \author Nicola Fioraio
+ * \ingroup registration
+ */
+template <typename GraphT, typename PointT>
+class PairwiseGraphRegistration : public GraphRegistration<GraphT> {
+public:
+ using GraphRegistration<GraphT>::graph_handler_;
+ using GraphRegistration<GraphT>::last_aligned_vertex_;
+ using GraphRegistration<GraphT>::last_vertices_;
- /** \brief Empty destructor */
- virtual ~PairwiseGraphRegistration () {}
+ using RegistrationPtr = typename Registration<PointT, PointT>::Ptr;
+ using GraphHandlerVertex = typename pcl::registration::GraphHandler<GraphT>::Vertex;
+ /** \brief Empty destructor */
+ virtual ~PairwiseGraphRegistration() {}
- /** \brief Empty constructor */
- PairwiseGraphRegistration () : registration_method_ (), incremental_ (true)
- {}
- /** \brief Constructor */
- PairwiseGraphRegistration (const RegistrationPtr& reg, bool incremental) : registration_method_ (reg), incremental_ (incremental)
- {}
+ /** \brief Empty constructor */
+ PairwiseGraphRegistration() : registration_method_(), incremental_(true) {}
+ /** \brief Constructor */
+ PairwiseGraphRegistration(const RegistrationPtr& reg, bool incremental)
+ : registration_method_(reg), incremental_(incremental)
+ {}
- /** \brief Set the registration object */
- inline void
- setRegistrationMethod (const RegistrationPtr& reg)
- {
- registration_method_ = reg;
- }
+ /** \brief Set the registration object */
+ inline void
+ setRegistrationMethod(const RegistrationPtr& reg)
+ {
+ registration_method_ = reg;
+ }
- /** \brief Get the registration object */
- inline RegistrationPtr
- getRegistrationMethod ()
- {
- return registration_method_;
- }
+ /** \brief Get the registration object */
+ inline RegistrationPtr
+ getRegistrationMethod()
+ {
+ return registration_method_;
+ }
- /** \brief If True the initial transformation is always set to the Identity */
- inline void
- setIncremental (bool incremental)
- {
- incremental_ = incremental;
- }
+ /** \brief If True the initial transformation is always set to the Identity */
+ inline void
+ setIncremental(bool incremental)
+ {
+ incremental_ = incremental;
+ }
- /** \brief Is incremental ? */
- inline bool
- isIncremental () const
- {
- return incremental_;
- }
+ /** \brief Is incremental ? */
+ inline bool
+ isIncremental() const
+ {
+ return incremental_;
+ }
- protected:
- /** \brief The registration object */
- RegistrationPtr registration_method_;
- /** \brief If True the initial transformation is always set to the Identity */
- bool incremental_;
+protected:
+ /** \brief The registration object */
+ RegistrationPtr registration_method_;
+ /** \brief If True the initial transformation is always set to the Identity */
+ bool incremental_;
- private:
- /** \brief The registration method */
- virtual void
- computeRegistration ();
- };
-}
+private:
+ /** \brief The registration method */
+ virtual void
+ computeRegistration();
+};
+} // namespace pcl
#include <pcl/registration/impl/pairwise_graph_registration.hpp>
#pragma once
-#include <pcl/registration/registration.h>
#include <pcl/features/ppf.h>
+#include <pcl/registration/registration.h>
#include <unordered_map>
-namespace pcl
-{
- class PCL_EXPORTS PPFHashMapSearch
+namespace pcl {
+class PCL_EXPORTS PPFHashMapSearch {
+public:
+ /** \brief Data structure to hold the information for the key in the feature hash map
+ * of the PPFHashMapSearch class \note It uses multiple pair levels in order to enable
+ * the usage of the boost::hash function which has the std::pair implementation (i.e.,
+ * does not require a custom hash function)
+ */
+ struct HashKeyStruct : public std::pair<int, std::pair<int, std::pair<int, int>>> {
+ HashKeyStruct() = default;
+
+ HashKeyStruct(int a, int b, int c, int d)
+ {
+ this->first = a;
+ this->second.first = b;
+ this->second.second.first = c;
+ this->second.second.second = d;
+ }
+
+ std::size_t
+ operator()(const HashKeyStruct& s) const noexcept
+ {
+ const std::size_t h1 = std::hash<int>{}(s.first);
+ const std::size_t h2 = std::hash<int>{}(s.second.first);
+ const std::size_t h3 = std::hash<int>{}(s.second.second.first);
+ const std::size_t h4 = std::hash<int>{}(s.second.second.second);
+ return h1 ^ (h2 << 1) ^ (h3 << 2) ^ (h4 << 3);
+ }
+ };
+ using FeatureHashMapType =
+ std::unordered_multimap<HashKeyStruct,
+ std::pair<std::size_t, std::size_t>,
+ HashKeyStruct>;
+ using FeatureHashMapTypePtr = shared_ptr<FeatureHashMapType>;
+ using Ptr = shared_ptr<PPFHashMapSearch>;
+ using ConstPtr = shared_ptr<const PPFHashMapSearch>;
+
+ /** \brief Constructor for the PPFHashMapSearch class which sets the two step
+ * parameters for the enclosed data structure \param angle_discretization_step the
+ * step value between each bin of the hash map for the angular values \param
+ * distance_discretization_step the step value between each bin of the hash map for
+ * the distance values
+ */
+ PPFHashMapSearch(float angle_discretization_step = 12.0f / 180.0f *
+ static_cast<float>(M_PI),
+ float distance_discretization_step = 0.01f)
+ : feature_hash_map_(new FeatureHashMapType)
+ , internals_initialized_(false)
+ , angle_discretization_step_(angle_discretization_step)
+ , distance_discretization_step_(distance_discretization_step)
+ , max_dist_(-1.0f)
+ {}
+
+ /** \brief Method that sets the feature cloud to be inserted in the hash map
+ * \param feature_cloud a const smart pointer to the PPFSignature feature cloud
+ */
+ void
+ setInputFeatureCloud(PointCloud<PPFSignature>::ConstPtr feature_cloud);
+
+ /** \brief Function for finding the nearest neighbors for the given feature inside the
+ * discretized hash map \param f1 The 1st value describing the query PPFSignature
+ * feature \param f2 The 2nd value describing the query PPFSignature feature \param f3
+ * The 3rd value describing the query PPFSignature feature \param f4 The 4th value
+ * describing the query PPFSignature feature \param indices a vector of pair indices
+ * representing the feature pairs that have been found in the bin corresponding to the
+ * query feature
+ */
+ void
+ nearestNeighborSearch(float& f1,
+ float& f2,
+ float& f3,
+ float& f4,
+ std::vector<std::pair<std::size_t, std::size_t>>& indices);
+
+ /** \brief Convenience method for returning a copy of the class instance as a
+ * shared_ptr */
+ Ptr
+ makeShared()
+ {
+ return Ptr(new PPFHashMapSearch(*this));
+ }
+
+ /** \brief Returns the angle discretization step parameter (the step value between
+ * each bin of the hash map for the angular values) */
+ inline float
+ getAngleDiscretizationStep() const
{
- public:
- /** \brief Data structure to hold the information for the key in the feature hash map of the
- * PPFHashMapSearch class
- * \note It uses multiple pair levels in order to enable the usage of the boost::hash function
- * which has the std::pair implementation (i.e., does not require a custom hash function)
- */
- struct HashKeyStruct : public std::pair <int, std::pair <int, std::pair <int, int> > >
- {
- HashKeyStruct () = default;
-
- HashKeyStruct(int a, int b, int c, int d)
- {
- this->first = a;
- this->second.first = b;
- this->second.second.first = c;
- this->second.second.second = d;
- }
-
- std::size_t operator()(const HashKeyStruct& s) const noexcept
- {
- const std::size_t h1 = std::hash<int>{} (s.first);
- const std::size_t h2 = std::hash<int>{} (s.second.first);
- const std::size_t h3 = std::hash<int>{} (s.second.second.first);
- const std::size_t h4 = std::hash<int>{} (s.second.second.second);
- return h1 ^ (h2 << 1) ^ (h3 << 2) ^ (h4 << 3);
- }
- };
- using FeatureHashMapType = std::unordered_multimap<HashKeyStruct, std::pair<std::size_t, std::size_t>, HashKeyStruct>;
- using FeatureHashMapTypePtr = shared_ptr<FeatureHashMapType>;
- using Ptr = shared_ptr<PPFHashMapSearch>;
- using ConstPtr = shared_ptr<const PPFHashMapSearch>;
-
-
- /** \brief Constructor for the PPFHashMapSearch class which sets the two step parameters for the enclosed data structure
- * \param angle_discretization_step the step value between each bin of the hash map for the angular values
- * \param distance_discretization_step the step value between each bin of the hash map for the distance values
- */
- PPFHashMapSearch (float angle_discretization_step = 12.0f / 180.0f * static_cast<float> (M_PI),
- float distance_discretization_step = 0.01f)
- : feature_hash_map_ (new FeatureHashMapType)
- , internals_initialized_ (false)
- , angle_discretization_step_ (angle_discretization_step)
- , distance_discretization_step_ (distance_discretization_step)
- , max_dist_ (-1.0f)
- {
- }
-
- /** \brief Method that sets the feature cloud to be inserted in the hash map
- * \param feature_cloud a const smart pointer to the PPFSignature feature cloud
- */
- void
- setInputFeatureCloud (PointCloud<PPFSignature>::ConstPtr feature_cloud);
-
- /** \brief Function for finding the nearest neighbors for the given feature inside the discretized hash map
- * \param f1 The 1st value describing the query PPFSignature feature
- * \param f2 The 2nd value describing the query PPFSignature feature
- * \param f3 The 3rd value describing the query PPFSignature feature
- * \param f4 The 4th value describing the query PPFSignature feature
- * \param indices a vector of pair indices representing the feature pairs that have been found in the bin
- * corresponding to the query feature
- */
- void
- nearestNeighborSearch (float &f1, float &f2, float &f3, float &f4,
- std::vector<std::pair<std::size_t, std::size_t> > &indices);
-
- /** \brief Convenience method for returning a copy of the class instance as a shared_ptr */
- Ptr
- makeShared() { return Ptr (new PPFHashMapSearch (*this)); }
-
- /** \brief Returns the angle discretization step parameter (the step value between each bin of the hash map for the angular values) */
- inline float
- getAngleDiscretizationStep () const { return angle_discretization_step_; }
-
- /** \brief Returns the distance discretization step parameter (the step value between each bin of the hash map for the distance values) */
- inline float
- getDistanceDiscretizationStep () const { return distance_discretization_step_; }
-
- /** \brief Returns the maximum distance found between any feature pair in the given input feature cloud */
- inline float
- getModelDiameter () const { return max_dist_; }
-
- std::vector <std::vector <float> > alpha_m_;
- private:
- FeatureHashMapTypePtr feature_hash_map_;
- bool internals_initialized_;
-
- float angle_discretization_step_, distance_discretization_step_;
- float max_dist_;
+ return angle_discretization_step_;
+ }
+
+ /** \brief Returns the distance discretization step parameter (the step value between
+ * each bin of the hash map for the distance values) */
+ inline float
+ getDistanceDiscretizationStep() const
+ {
+ return distance_discretization_step_;
+ }
+
+ /** \brief Returns the maximum distance found between any feature pair in the given
+ * input feature cloud */
+ inline float
+ getModelDiameter() const
+ {
+ return max_dist_;
+ }
+
+ std::vector<std::vector<float>> alpha_m_;
+
+private:
+ FeatureHashMapTypePtr feature_hash_map_;
+ bool internals_initialized_;
+
+ float angle_discretization_step_, distance_discretization_step_;
+ float max_dist_;
+};
+
+/** \brief Class that registers two point clouds based on their sets of PPFSignatures.
+ * Please refer to the following publication for more details:
+ * B. Drost, M. Ulrich, N. Navab, S. Ilic
+ * Model Globally, Match Locally: Efficient and Robust 3D Object Recognition
+ * 2010 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)
+ * 13-18 June 2010, San Francisco, CA
+ *
+ * \note This class works in tandem with the PPFEstimation class
+ *
+ * \author Alexandru-Eugen Ichim
+ */
+template <typename PointSource, typename PointTarget>
+class PPFRegistration : public Registration<PointSource, PointTarget> {
+public:
+ /** \brief Structure for storing a pose (represented as an Eigen::Affine3f) and an
+ * integer for counting votes \note initially used std::pair<Eigen::Affine3f, unsigned
+ * int>, but it proved problematic because of the Eigen structures alignment problems
+ * - std::pair does not have a custom allocator
+ */
+ struct PoseWithVotes {
+ PoseWithVotes(Eigen::Affine3f& a_pose, unsigned int& a_votes)
+ : pose(a_pose), votes(a_votes)
+ {}
+
+ Eigen::Affine3f pose;
+ unsigned int votes;
};
+ using PoseWithVotesList =
+ std::vector<PoseWithVotes, Eigen::aligned_allocator<PoseWithVotes>>;
+
+ /// input_ is the model cloud
+ using Registration<PointSource, PointTarget>::input_;
+ /// target_ is the scene cloud
+ using Registration<PointSource, PointTarget>::target_;
+ using Registration<PointSource, PointTarget>::converged_;
+ using Registration<PointSource, PointTarget>::final_transformation_;
+ using Registration<PointSource, PointTarget>::transformation_;
+
+ using PointCloudSource = pcl::PointCloud<PointSource>;
+ using PointCloudSourcePtr = typename PointCloudSource::Ptr;
+ using PointCloudSourceConstPtr = typename PointCloudSource::ConstPtr;
+
+ using PointCloudTarget = pcl::PointCloud<PointTarget>;
+ using PointCloudTargetPtr = typename PointCloudTarget::Ptr;
+ using PointCloudTargetConstPtr = typename PointCloudTarget::ConstPtr;
+
+ /** \brief Empty constructor that initializes all the parameters of the algorithm with
+ * default values */
+ PPFRegistration()
+ : Registration<PointSource, PointTarget>()
+ , scene_reference_point_sampling_rate_(5)
+ , clustering_position_diff_threshold_(0.01f)
+ , clustering_rotation_diff_threshold_(20.0f / 180.0f * static_cast<float>(M_PI))
+ {}
+
+ /** \brief Method for setting the position difference clustering parameter
+ * \param clustering_position_diff_threshold distance threshold below which two poses
+ * are considered close enough to be in the same cluster (for the clustering phase of
+ * the algorithm)
+ */
+ inline void
+ setPositionClusteringThreshold(float clustering_position_diff_threshold)
+ {
+ clustering_position_diff_threshold_ = clustering_position_diff_threshold;
+ }
- /** \brief Class that registers two point clouds based on their sets of PPFSignatures.
- * Please refer to the following publication for more details:
- * B. Drost, M. Ulrich, N. Navab, S. Ilic
- * Model Globally, Match Locally: Efficient and Robust 3D Object Recognition
- * 2010 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)
- * 13-18 June 2010, San Francisco, CA
- *
- * \note This class works in tandem with the PPFEstimation class
- *
- * \author Alexandru-Eugen Ichim
+ /** \brief Returns the parameter defining the position difference clustering parameter
+ * - distance threshold below which two poses are considered close enough to be in the
+ * same cluster (for the clustering phase of the algorithm)
*/
- template <typename PointSource, typename PointTarget>
- class PPFRegistration : public Registration<PointSource, PointTarget>
+ inline float
+ getPositionClusteringThreshold()
{
- public:
- /** \brief Structure for storing a pose (represented as an Eigen::Affine3f) and an integer for counting votes
- * \note initially used std::pair<Eigen::Affine3f, unsigned int>, but it proved problematic
- * because of the Eigen structures alignment problems - std::pair does not have a custom allocator
- */
- struct PoseWithVotes
- {
- PoseWithVotes(Eigen::Affine3f &a_pose, unsigned int &a_votes)
- : pose (a_pose),
- votes (a_votes)
- {}
-
- Eigen::Affine3f pose;
- unsigned int votes;
- };
- using PoseWithVotesList = std::vector<PoseWithVotes, Eigen::aligned_allocator<PoseWithVotes> >;
-
- /// input_ is the model cloud
- using Registration<PointSource, PointTarget>::input_;
- /// target_ is the scene cloud
- using Registration<PointSource, PointTarget>::target_;
- using Registration<PointSource, PointTarget>::converged_;
- using Registration<PointSource, PointTarget>::final_transformation_;
- using Registration<PointSource, PointTarget>::transformation_;
-
- using PointCloudSource = pcl::PointCloud<PointSource>;
- using PointCloudSourcePtr = typename PointCloudSource::Ptr;
- using PointCloudSourceConstPtr = typename PointCloudSource::ConstPtr;
-
- using PointCloudTarget = pcl::PointCloud<PointTarget>;
- using PointCloudTargetPtr = typename PointCloudTarget::Ptr;
- using PointCloudTargetConstPtr = typename PointCloudTarget::ConstPtr;
-
-
- /** \brief Empty constructor that initializes all the parameters of the algorithm with default values */
- PPFRegistration ()
- : Registration<PointSource, PointTarget> (),
- scene_reference_point_sampling_rate_ (5),
- clustering_position_diff_threshold_ (0.01f),
- clustering_rotation_diff_threshold_ (20.0f / 180.0f * static_cast<float> (M_PI))
- {}
-
- /** \brief Method for setting the position difference clustering parameter
- * \param clustering_position_diff_threshold distance threshold below which two poses are
- * considered close enough to be in the same cluster (for the clustering phase of the algorithm)
- */
- inline void
- setPositionClusteringThreshold (float clustering_position_diff_threshold) { clustering_position_diff_threshold_ = clustering_position_diff_threshold; }
-
- /** \brief Returns the parameter defining the position difference clustering parameter -
- * distance threshold below which two poses are considered close enough to be in the same cluster
- * (for the clustering phase of the algorithm)
- */
- inline float
- getPositionClusteringThreshold () { return clustering_position_diff_threshold_; }
-
- /** \brief Method for setting the rotation clustering parameter
- * \param clustering_rotation_diff_threshold rotation difference threshold below which two
- * poses are considered to be in the same cluster (for the clustering phase of the algorithm)
- */
- inline void
- setRotationClusteringThreshold (float clustering_rotation_diff_threshold) { clustering_rotation_diff_threshold_ = clustering_rotation_diff_threshold; }
-
- /** \brief Returns the parameter defining the rotation clustering threshold
- */
- inline float
- getRotationClusteringThreshold () { return clustering_rotation_diff_threshold_; }
-
- /** \brief Method for setting the scene reference point sampling rate
- * \param scene_reference_point_sampling_rate sampling rate for the scene reference point
- */
- inline void
- setSceneReferencePointSamplingRate (unsigned int scene_reference_point_sampling_rate) { scene_reference_point_sampling_rate_ = scene_reference_point_sampling_rate; }
-
- /** \brief Returns the parameter for the scene reference point sampling rate of the algorithm */
- inline unsigned int
- getSceneReferencePointSamplingRate () { return scene_reference_point_sampling_rate_; }
-
- /** \brief Function that sets the search method for the algorithm
- * \note Right now, the only available method is the one initially proposed by
- * the authors - by using a hash map with discretized feature vectors
- * \param search_method smart pointer to the search method to be set
- */
- inline void
- setSearchMethod (PPFHashMapSearch::Ptr search_method) { search_method_ = search_method; }
-
- /** \brief Getter function for the search method of the class */
- inline PPFHashMapSearch::Ptr
- getSearchMethod () { return search_method_; }
-
- /** \brief Provide a pointer to the input target (e.g., the point cloud that we want to align the input source to)
- * \param cloud the input point cloud target
- */
- void
- setInputTarget (const PointCloudTargetConstPtr &cloud) override;
-
-
- private:
- /** \brief Method that calculates the transformation between the input_ and target_ point clouds, based on the PPF features */
- void
- computeTransformation (PointCloudSource &output, const Eigen::Matrix4f& guess) override;
-
-
- /** \brief the search method that is going to be used to find matching feature pairs */
- PPFHashMapSearch::Ptr search_method_;
-
- /** \brief parameter for the sampling rate of the scene reference points */
- unsigned int scene_reference_point_sampling_rate_;
-
- /** \brief position and rotation difference thresholds below which two
- * poses are considered to be in the same cluster (for the clustering phase of the algorithm) */
- float clustering_position_diff_threshold_, clustering_rotation_diff_threshold_;
-
- /** \brief use a kd-tree with range searches of range max_dist to skip an O(N) pass through the point cloud */
- typename pcl::KdTreeFLANN<PointTarget>::Ptr scene_search_tree_;
-
- /** \brief static method used for the std::sort function to order two PoseWithVotes
- * instances by their number of votes*/
- static bool
- poseWithVotesCompareFunction (const PoseWithVotes &a,
- const PoseWithVotes &b);
-
- /** \brief static method used for the std::sort function to order two pairs <index, votes>
- * by the number of votes (unsigned integer value) */
- static bool
- clusterVotesCompareFunction (const std::pair<std::size_t, unsigned int> &a,
- const std::pair<std::size_t, unsigned int> &b);
-
- /** \brief Method that clusters a set of given poses by using the clustering thresholds
- * and their corresponding number of votes (see publication for more details) */
- void
- clusterPoses (PoseWithVotesList &poses,
- PoseWithVotesList &result);
-
- /** \brief Method that checks whether two poses are close together - based on the clustering threshold parameters
- * of the class */
- bool
- posesWithinErrorBounds (Eigen::Affine3f &pose1,
- Eigen::Affine3f &pose2);
- };
-}
+ return clustering_position_diff_threshold_;
+ }
+
+ /** \brief Method for setting the rotation clustering parameter
+ * \param clustering_rotation_diff_threshold rotation difference threshold below which
+ * two poses are considered to be in the same cluster (for the clustering phase of the
+ * algorithm)
+ */
+ inline void
+ setRotationClusteringThreshold(float clustering_rotation_diff_threshold)
+ {
+ clustering_rotation_diff_threshold_ = clustering_rotation_diff_threshold;
+ }
+
+ /** \brief Returns the parameter defining the rotation clustering threshold
+ */
+ inline float
+ getRotationClusteringThreshold()
+ {
+ return clustering_rotation_diff_threshold_;
+ }
+
+ /** \brief Method for setting the scene reference point sampling rate
+ * \param scene_reference_point_sampling_rate sampling rate for the scene reference
+ * point
+ */
+ inline void
+ setSceneReferencePointSamplingRate(unsigned int scene_reference_point_sampling_rate)
+ {
+ scene_reference_point_sampling_rate_ = scene_reference_point_sampling_rate;
+ }
+
+ /** \brief Returns the parameter for the scene reference point sampling rate of the
+ * algorithm */
+ inline unsigned int
+ getSceneReferencePointSamplingRate()
+ {
+ return scene_reference_point_sampling_rate_;
+ }
+
+ /** \brief Function that sets the search method for the algorithm
+ * \note Right now, the only available method is the one initially proposed by
+ * the authors - by using a hash map with discretized feature vectors
+ * \param search_method smart pointer to the search method to be set
+ */
+ inline void
+ setSearchMethod(PPFHashMapSearch::Ptr search_method)
+ {
+ search_method_ = search_method;
+ }
+
+ /** \brief Getter function for the search method of the class */
+ inline PPFHashMapSearch::Ptr
+ getSearchMethod()
+ {
+ return search_method_;
+ }
+
+ /** \brief Provide a pointer to the input target (e.g., the point cloud that we want
+ * to align the input source to) \param cloud the input point cloud target
+ */
+ void
+ setInputTarget(const PointCloudTargetConstPtr& cloud) override;
+
+private:
+ /** \brief Method that calculates the transformation between the input_ and target_
+ * point clouds, based on the PPF features */
+ void
+ computeTransformation(PointCloudSource& output,
+ const Eigen::Matrix4f& guess) override;
+
+ /** \brief the search method that is going to be used to find matching feature pairs
+ */
+ PPFHashMapSearch::Ptr search_method_;
+
+ /** \brief parameter for the sampling rate of the scene reference points */
+ uindex_t scene_reference_point_sampling_rate_;
+
+ /** \brief position and rotation difference thresholds below which two
+ * poses are considered to be in the same cluster (for the clustering phase of the
+ * algorithm) */
+ float clustering_position_diff_threshold_, clustering_rotation_diff_threshold_;
+
+ /** \brief use a kd-tree with range searches of range max_dist to skip an O(N) pass
+ * through the point cloud */
+ typename pcl::KdTreeFLANN<PointTarget>::Ptr scene_search_tree_;
+
+ /** \brief static method used for the std::sort function to order two PoseWithVotes
+ * instances by their number of votes*/
+ static bool
+ poseWithVotesCompareFunction(const PoseWithVotes& a, const PoseWithVotes& b);
+
+ /** \brief static method used for the std::sort function to order two pairs <index,
+ * votes> by the number of votes (unsigned integer value) */
+ static bool
+ clusterVotesCompareFunction(const std::pair<std::size_t, unsigned int>& a,
+ const std::pair<std::size_t, unsigned int>& b);
+
+ /** \brief Method that clusters a set of given poses by using the clustering
+ * thresholds and their corresponding number of votes (see publication for more
+ * details) */
+ void
+ clusterPoses(PoseWithVotesList& poses, PoseWithVotesList& result);
+
+ /** \brief Method that checks whether two poses are close together - based on the
+ * clustering threshold parameters of the class */
+ bool
+ posesWithinErrorBounds(Eigen::Affine3f& pose1, Eigen::Affine3f& pose2);
+};
+} // namespace pcl
#include <pcl/registration/impl/ppf_registration.hpp>
#include <pcl/pcl_base.h>
#include <pcl/point_representation.h>
-namespace pcl
-{
- /**
- * \brief Class that compares two sets of features by using a multiscale representation of the features inside a
- * pyramid. Each level of the pyramid offers information about the similarity of the two feature sets.
- * \note Works with any Point/Feature type which has a PointRepresentation implementation
- * \note The only parameters it needs are the input dimension ranges and the output dimension ranges. The input
- * dimension ranges represent the ranges in which each dimension of the feature vector lies. As described in the
- * paper, a minimum inter-vector distance of sqrt(nr_dims)/2 is needed. As such, the target dimension range parameter
- * is used in order to augment/reduce the range for each dimension in order to obtain the necessary minimal
- * inter-vector distance and to add/subtract weight to/from certain dimensions of the feature vector.
- *
- * Follows the algorithm presented in the publication:
- * Grauman, K. & Darrell, T.
- * The Pyramid Match Kernel: Discriminative Classification with Sets of Image Features
- * Tenth IEEE International Conference on Computer Vision ICCV05 Volume 1
- * October 2005
- *
- * \author Alexandru-Eugen Ichim
+namespace pcl {
+/**
+ * \brief Class that compares two sets of features by using a multiscale representation
+ * of the features inside a pyramid. Each level of the pyramid offers information about
+ * the similarity of the two feature sets. \note Works with any Point/Feature type which
+ * has a PointRepresentation implementation \note The only parameters it needs are the
+ * input dimension ranges and the output dimension ranges. The input dimension ranges
+ * represent the ranges in which each dimension of the feature vector lies. As described
+ * in the paper, a minimum inter-vector distance of sqrt(nr_dims)/2 is needed. As such,
+ * the target dimension range parameter is used in order to augment/reduce the range for
+ * each dimension in order to obtain the necessary minimal inter-vector distance and to
+ * add/subtract weight to/from certain dimensions of the feature vector.
+ *
+ * Follows the algorithm presented in the publication:
+ * Grauman, K. & Darrell, T.
+ * The Pyramid Match Kernel: Discriminative Classification with Sets of Image
+ * Features Tenth IEEE International Conference on Computer Vision ICCV05 Volume 1
+ * October 2005
+ *
+ * \author Alexandru-Eugen Ichim
+ */
+template <typename PointFeature>
+class PyramidFeatureHistogram : public PCLBase<PointFeature> {
+public:
+ using PCLBase<PointFeature>::input_;
+
+ using Ptr = shared_ptr<PyramidFeatureHistogram<PointFeature>>;
+ using ConstPtr = shared_ptr<const PyramidFeatureHistogram<PointFeature>>;
+ using PyramidFeatureHistogramPtr = Ptr;
+ using FeatureRepresentationConstPtr =
+ shared_ptr<const pcl::PointRepresentation<PointFeature>>;
+
+ /** \brief Empty constructor that instantiates the feature representation variable */
+ PyramidFeatureHistogram();
+
+ /** \brief Method for setting the input dimension range parameter.
+ * \note Please check the PyramidHistogram class description for more details about
+ * this parameter.
+ */
+ inline void
+ setInputDimensionRange(std::vector<std::pair<float, float>>& dimension_range_input)
+ {
+ dimension_range_input_ = dimension_range_input;
+ }
+
+ /** \brief Method for retrieving the input dimension range vector */
+ inline std::vector<std::pair<float, float>>
+ getInputDimensionRange()
+ {
+ return dimension_range_input_;
+ }
+
+ /** \brief Method to set the target dimension range parameter.
+ * \note Please check the PyramidHistogram class description for more details about
+ * this parameter.
+ */
+ inline void
+ setTargetDimensionRange(std::vector<std::pair<float, float>>& dimension_range_target)
+ {
+ dimension_range_target_ = dimension_range_target;
+ }
+
+ /** \brief Method for retrieving the target dimension range vector */
+ inline std::vector<std::pair<float, float>>
+ getTargetDimensionRange()
+ {
+ return dimension_range_target_;
+ }
+
+ /** \brief Provide a pointer to the feature representation to use to convert features
+ * to k-D vectors. \param feature_representation the const boost shared pointer to a
+ * PointRepresentation
*/
- template <typename PointFeature>
- class PyramidFeatureHistogram : public PCLBase<PointFeature>
+ inline void
+ setPointRepresentation(const FeatureRepresentationConstPtr& feature_representation)
{
- public:
- using PCLBase<PointFeature>::input_;
-
- using Ptr = shared_ptr<PyramidFeatureHistogram<PointFeature> >;
- using ConstPtr = shared_ptr<const PyramidFeatureHistogram<PointFeature>>;
- using PyramidFeatureHistogramPtr = Ptr;
- using FeatureRepresentationConstPtr = shared_ptr<const pcl::PointRepresentation<PointFeature> >;
-
-
- /** \brief Empty constructor that instantiates the feature representation variable */
- PyramidFeatureHistogram ();
-
- /** \brief Method for setting the input dimension range parameter.
- * \note Please check the PyramidHistogram class description for more details about this parameter.
- */
- inline void
- setInputDimensionRange (std::vector<std::pair<float, float> > &dimension_range_input)
- { dimension_range_input_ = dimension_range_input; }
-
- /** \brief Method for retrieving the input dimension range vector */
- inline std::vector<std::pair<float, float> >
- getInputDimensionRange () { return dimension_range_input_; }
-
- /** \brief Method to set the target dimension range parameter.
- * \note Please check the PyramidHistogram class description for more details about this parameter.
- */
- inline void
- setTargetDimensionRange (std::vector<std::pair<float, float> > &dimension_range_target)
- { dimension_range_target_ = dimension_range_target; }
-
- /** \brief Method for retrieving the target dimension range vector */
- inline std::vector<std::pair<float, float> >
- getTargetDimensionRange () { return dimension_range_target_; }
-
- /** \brief Provide a pointer to the feature representation to use to convert features to k-D vectors.
- * \param feature_representation the const boost shared pointer to a PointRepresentation
- */
- inline void
- setPointRepresentation (const FeatureRepresentationConstPtr& feature_representation) { feature_representation_ = feature_representation; }
-
- /** \brief Get a pointer to the feature representation used when converting features into k-D vectors. */
- inline FeatureRepresentationConstPtr const
- getPointRepresentation () { return feature_representation_; }
-
- /** \brief The central method for inserting the feature set inside the pyramid and obtaining the complete pyramid */
- void
- compute ();
-
- /** \brief Checks whether the pyramid histogram has been computed */
- inline bool
- isComputed () { return is_computed_; }
-
- /** \brief Static method for comparing two pyramid histograms that returns a floating point value between 0 and 1,
- * representing the similarity between the feature sets on which the two pyramid histograms are based.
- * \param pyramid_a Pointer to the first pyramid to be compared (needs to be computed already).
- * \param pyramid_b Pointer to the second pyramid to be compared (needs to be computed already).
- */
- static float
- comparePyramidFeatureHistograms (const PyramidFeatureHistogramPtr &pyramid_a,
- const PyramidFeatureHistogramPtr &pyramid_b);
-
-
- private:
- std::size_t nr_dimensions, nr_levels, nr_features;
- std::vector<std::pair<float, float> > dimension_range_input_, dimension_range_target_;
- FeatureRepresentationConstPtr feature_representation_;
- bool is_computed_;
-
- /** \brief Checks for input inconsistencies and initializes the underlying data structures */
- bool
- initializeHistogram ();
-
- /** \brief Converts a feature in templated form to an STL vector. This is the point where the conversion from the
- * input dimension range to the target dimension range is done.
- */
- void
- convertFeatureToVector (const PointFeature &feature,
- std::vector<float> &feature_vector);
-
- /** \brief Adds a feature vector to its corresponding bin at each level in the pyramid */
- void
- addFeature (std::vector<float> &feature);
-
- /** \brief Access the pyramid bin given the position of the bin at the given pyramid level
- * and the pyramid level
- * \param access index of the bin at the respective level
- * \param level the level in the pyramid
- */
- inline unsigned int&
- at (std::vector<std::size_t> &access,
- std::size_t &level);
-
- /** \brief Access the pyramid bin given a feature vector and the pyramid level
- * \param feature the feature in vectorized form
- * \param level the level in the pyramid
- */
- inline unsigned int&
- at (std::vector<float> &feature,
- std::size_t &level);
-
- /** \brief Structure for representing a single pyramid histogram level */
- struct PyramidFeatureHistogramLevel
- {
- PyramidFeatureHistogramLevel ()
- {
- }
-
- PyramidFeatureHistogramLevel (std::vector<std::size_t> &a_bins_per_dimension, std::vector<float> &a_bin_step) :
- bins_per_dimension (a_bins_per_dimension),
- bin_step (a_bin_step)
- {
- initializeHistogramLevel ();
- }
-
- void
- initializeHistogramLevel ();
-
- std::vector<unsigned int> hist;
- std::vector<std::size_t> bins_per_dimension;
- std::vector<float> bin_step;
- };
- std::vector<PyramidFeatureHistogramLevel> hist_levels;
+ feature_representation_ = feature_representation;
+ }
+
+ /** \brief Get a pointer to the feature representation used when converting features
+ * into k-D vectors. */
+ inline FeatureRepresentationConstPtr const
+ getPointRepresentation()
+ {
+ return feature_representation_;
+ }
+
+ /** \brief The central method for inserting the feature set inside the pyramid and
+ * obtaining the complete pyramid */
+ void
+ compute();
+
+ /** \brief Checks whether the pyramid histogram has been computed */
+ inline bool
+ isComputed()
+ {
+ return is_computed_;
+ }
+
+ /** \brief Static method for comparing two pyramid histograms that returns a floating
+ * point value between 0 and 1, representing the similarity between the feature sets
+ * on which the two pyramid histograms are based. \param pyramid_a Pointer to the
+ * first pyramid to be compared (needs to be computed already). \param pyramid_b
+ * Pointer to the second pyramid to be compared (needs to be computed already).
+ */
+ static float
+ comparePyramidFeatureHistograms(const PyramidFeatureHistogramPtr& pyramid_a,
+ const PyramidFeatureHistogramPtr& pyramid_b);
+
+private:
+ std::size_t nr_dimensions, nr_levels, nr_features;
+ std::vector<std::pair<float, float>> dimension_range_input_, dimension_range_target_;
+ FeatureRepresentationConstPtr feature_representation_;
+ bool is_computed_;
+
+ /** \brief Checks for input inconsistencies and initializes the underlying data
+ * structures */
+ bool
+ initializeHistogram();
+
+ /** \brief Converts a feature in templated form to an STL vector. This is the point
+ * where the conversion from the input dimension range to the target dimension range
+ * is done.
+ */
+ void
+ convertFeatureToVector(const PointFeature& feature,
+ std::vector<float>& feature_vector);
+
+ /** \brief Adds a feature vector to its corresponding bin at each level in the pyramid
+ */
+ void
+ addFeature(std::vector<float>& feature);
+
+ /** \brief Access the pyramid bin given the position of the bin at the given pyramid
+ * level and the pyramid level \param access index of the bin at the respective level
+ * \param level the level in the pyramid
+ */
+ inline unsigned int&
+ at(std::vector<std::size_t>& access, std::size_t& level);
+
+ /** \brief Access the pyramid bin given a feature vector and the pyramid level
+ * \param feature the feature in vectorized form
+ * \param level the level in the pyramid
+ */
+ inline unsigned int&
+ at(std::vector<float>& feature, std::size_t& level);
+
+ /** \brief Structure for representing a single pyramid histogram level */
+ struct PyramidFeatureHistogramLevel {
+ PyramidFeatureHistogramLevel() {}
+
+ PyramidFeatureHistogramLevel(std::vector<std::size_t>& a_bins_per_dimension,
+ std::vector<float>& a_bin_step)
+ : bins_per_dimension(a_bins_per_dimension), bin_step(a_bin_step)
+ {
+ initializeHistogramLevel();
+ }
+
+ void
+ initializeHistogramLevel();
+
+ std::vector<unsigned int> hist;
+ std::vector<std::size_t> bins_per_dimension;
+ std::vector<float> bin_step;
};
-}
+ std::vector<PyramidFeatureHistogramLevel> hist_levels;
+};
+} // namespace pcl
#ifdef PCL_NO_PRECOMPILE
#include <pcl/registration/impl/pyramid_feature_matching.hpp>
#pragma once
// PCL includes
-#include <pcl/pcl_base.h>
-#include <pcl/common/transforms.h>
-#include <pcl/memory.h>
-#include <pcl/pcl_macros.h>
-#include <pcl/search/kdtree.h>
-#include <pcl/registration/boost.h>
-#include <pcl/registration/transformation_estimation.h>
#include <pcl/registration/correspondence_estimation.h>
#include <pcl/registration/correspondence_rejection.h>
+#include <pcl/registration/transformation_estimation.h>
+#include <pcl/search/kdtree.h>
+#include <pcl/memory.h>
+#include <pcl/pcl_base.h>
+#include <pcl/pcl_macros.h>
+
+namespace pcl {
+/** \brief @b Registration represents the base registration class for general purpose,
+ * ICP-like methods. \author Radu B. Rusu, Michael Dixon \ingroup registration
+ */
+template <typename PointSource, typename PointTarget, typename Scalar = float>
+class Registration : public PCLBase<PointSource> {
+public:
+ using Matrix4 = Eigen::Matrix<Scalar, 4, 4>;
+
+ // using PCLBase<PointSource>::initCompute;
+ using PCLBase<PointSource>::deinitCompute;
+ using PCLBase<PointSource>::input_;
+ using PCLBase<PointSource>::indices_;
+
+ using Ptr = shared_ptr<Registration<PointSource, PointTarget, Scalar>>;
+ using ConstPtr = shared_ptr<const Registration<PointSource, PointTarget, Scalar>>;
+
+ using CorrespondenceRejectorPtr = pcl::registration::CorrespondenceRejector::Ptr;
+ using KdTree = pcl::search::KdTree<PointTarget>;
+ using KdTreePtr = typename KdTree::Ptr;
+
+ using KdTreeReciprocal = pcl::search::KdTree<PointSource>;
+ using KdTreeReciprocalPtr = typename KdTreeReciprocal::Ptr;
+
+ using PointCloudSource = pcl::PointCloud<PointSource>;
+ using PointCloudSourcePtr = typename PointCloudSource::Ptr;
+ using PointCloudSourceConstPtr = typename PointCloudSource::ConstPtr;
+
+ using PointCloudTarget = pcl::PointCloud<PointTarget>;
+ using PointCloudTargetPtr = typename PointCloudTarget::Ptr;
+ using PointCloudTargetConstPtr = typename PointCloudTarget::ConstPtr;
+
+ using PointRepresentationConstPtr = typename KdTree::PointRepresentationConstPtr;
+
+ using TransformationEstimation = typename pcl::registration::
+ TransformationEstimation<PointSource, PointTarget, Scalar>;
+ using TransformationEstimationPtr = typename TransformationEstimation::Ptr;
+ using TransformationEstimationConstPtr = typename TransformationEstimation::ConstPtr;
+
+ using CorrespondenceEstimation =
+ pcl::registration::CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>;
+ using CorrespondenceEstimationPtr = typename CorrespondenceEstimation::Ptr;
+ using CorrespondenceEstimationConstPtr = typename CorrespondenceEstimation::ConstPtr;
+
+ /** \brief The callback signature to the function updating intermediate source point
+ * cloud position during it's registration to the target point cloud. \param[in]
+ * cloud_src - the point cloud which will be updated to match target \param[in]
+ * indices_src - a selector of points in cloud_src \param[in] cloud_tgt - the point
+ * cloud we want to register against \param[in] indices_tgt - a selector of points in
+ * cloud_tgt
+ */
+ using UpdateVisualizerCallbackSignature = void(const pcl::PointCloud<PointSource>&,
+ const pcl::Indices&,
+ const pcl::PointCloud<PointTarget>&,
+ const pcl::Indices&);
+
+ /** \brief Empty constructor. */
+ Registration()
+ : tree_(new KdTree)
+ , tree_reciprocal_(new KdTreeReciprocal)
+ , nr_iterations_(0)
+ , max_iterations_(10)
+ , ransac_iterations_(0)
+ , target_()
+ , final_transformation_(Matrix4::Identity())
+ , transformation_(Matrix4::Identity())
+ , previous_transformation_(Matrix4::Identity())
+ , transformation_epsilon_(0.0)
+ , transformation_rotation_epsilon_(0.0)
+ , euclidean_fitness_epsilon_(-std::numeric_limits<double>::max())
+ , corr_dist_threshold_(std::sqrt(std::numeric_limits<double>::max()))
+ , inlier_threshold_(0.05)
+ , converged_(false)
+ , min_number_correspondences_(3)
+ , correspondences_(new Correspondences)
+ , transformation_estimation_()
+ , correspondence_estimation_()
+ , target_cloud_updated_(true)
+ , source_cloud_updated_(true)
+ , force_no_recompute_(false)
+ , force_no_recompute_reciprocal_(false)
+ , point_representation_()
+ {}
+
+ /** \brief destructor. */
+ ~Registration() {}
+
+ /** \brief Provide a pointer to the transformation estimation object.
+ * (e.g., SVD, point to plane etc.)
+ *
+ * \param[in] te is the pointer to the corresponding transformation estimation object
+ *
+ * Code example:
+ *
+ * \code
+ * TransformationEstimationPointToPlaneLLS<PointXYZ, PointXYZ>::Ptr trans_lls
+ * (new TransformationEstimationPointToPlaneLLS<PointXYZ, PointXYZ>);
+ * icp.setTransformationEstimation (trans_lls);
+ * // or...
+ * TransformationEstimationSVD<PointXYZ, PointXYZ>::Ptr trans_svd
+ * (new TransformationEstimationSVD<PointXYZ, PointXYZ>);
+ * icp.setTransformationEstimation (trans_svd);
+ * \endcode
+ */
+ void
+ setTransformationEstimation(const TransformationEstimationPtr& te)
+ {
+ transformation_estimation_ = te;
+ }
+
+ /** \brief Provide a pointer to the correspondence estimation object.
+ * (e.g., regular, reciprocal, normal shooting etc.)
+ *
+ * \param[in] ce is the pointer to the corresponding correspondence estimation object
+ *
+ * Code example:
+ *
+ * \code
+ * CorrespondenceEstimation<PointXYZ, PointXYZ>::Ptr
+ * ce (new CorrespondenceEstimation<PointXYZ, PointXYZ>);
+ * ce->setInputSource (source);
+ * ce->setInputTarget (target);
+ * icp.setCorrespondenceEstimation (ce);
+ * // or...
+ * CorrespondenceEstimationNormalShooting<PointNormal, PointNormal, PointNormal>::Ptr
+ * cens (new CorrespondenceEstimationNormalShooting<PointNormal, PointNormal>);
+ * ce->setInputSource (source);
+ * ce->setInputTarget (target);
+ * ce->setSourceNormals (source);
+ * ce->setTargetNormals (target);
+ * icp.setCorrespondenceEstimation (cens);
+ * \endcode
+ */
+ void
+ setCorrespondenceEstimation(const CorrespondenceEstimationPtr& ce)
+ {
+ correspondence_estimation_ = ce;
+ }
+
+ /** \brief Provide a pointer to the input source
+ * (e.g., the point cloud that we want to align to the target)
+ *
+ * \param[in] cloud the input point cloud source
+ */
+ virtual void
+ setInputSource(const PointCloudSourceConstPtr& cloud);
+
+ /** \brief Get a pointer to the input point cloud dataset target. */
+ inline PointCloudSourceConstPtr const
+ getInputSource()
+ {
+ return (input_);
+ }
+
+ /** \brief Provide a pointer to the input target (e.g., the point cloud that we want
+ * to align the input source to) \param[in] cloud the input point cloud target
+ */
+ virtual inline void
+ setInputTarget(const PointCloudTargetConstPtr& cloud);
+
+ /** \brief Get a pointer to the input point cloud dataset target. */
+ inline PointCloudTargetConstPtr const
+ getInputTarget()
+ {
+ return (target_);
+ }
+
+ /** \brief Provide a pointer to the search object used to find correspondences in
+ * the target cloud.
+ * \param[in] tree a pointer to the spatial search object.
+ * \param[in] force_no_recompute If set to true, this tree will NEVER be
+ * recomputed, regardless of calls to setInputTarget. Only use if you are
+ * confident that the tree will be set correctly.
+ */
+ inline void
+ setSearchMethodTarget(const KdTreePtr& tree, bool force_no_recompute = false)
+ {
+ tree_ = tree;
+ force_no_recompute_ = force_no_recompute;
+ // Since we just set a new tree, we need to check for updates
+ target_cloud_updated_ = true;
+ }
+
+ /** \brief Get a pointer to the search method used to find correspondences in the
+ * target cloud. */
+ inline KdTreePtr
+ getSearchMethodTarget() const
+ {
+ return (tree_);
+ }
+
+ /** \brief Provide a pointer to the search object used to find correspondences in
+ * the source cloud (usually used by reciprocal correspondence finding).
+ * \param[in] tree a pointer to the spatial search object.
+ * \param[in] force_no_recompute If set to true, this tree will NEVER be
+ * recomputed, regardless of calls to setInputSource. Only use if you are
+ * extremely confident that the tree will be set correctly.
+ */
+ inline void
+ setSearchMethodSource(const KdTreeReciprocalPtr& tree,
+ bool force_no_recompute = false)
+ {
+ tree_reciprocal_ = tree;
+ force_no_recompute_reciprocal_ = force_no_recompute;
+ // Since we just set a new tree, we need to check for updates
+ source_cloud_updated_ = true;
+ }
+
+ /** \brief Get a pointer to the search method used to find correspondences in the
+ * source cloud. */
+ inline KdTreeReciprocalPtr
+ getSearchMethodSource() const
+ {
+ return (tree_reciprocal_);
+ }
-namespace pcl
-{
- /** \brief @b Registration represents the base registration class for general purpose, ICP-like methods.
- * \author Radu B. Rusu, Michael Dixon
- * \ingroup registration
- */
- template <typename PointSource, typename PointTarget, typename Scalar = float>
- class Registration : public PCLBase<PointSource>
- {
- public:
- using Matrix4 = Eigen::Matrix<Scalar, 4, 4>;
-
- // using PCLBase<PointSource>::initCompute;
- using PCLBase<PointSource>::deinitCompute;
- using PCLBase<PointSource>::input_;
- using PCLBase<PointSource>::indices_;
-
- using Ptr = shared_ptr< Registration<PointSource, PointTarget, Scalar> >;
- using ConstPtr = shared_ptr< const Registration<PointSource, PointTarget, Scalar> >;
-
- using CorrespondenceRejectorPtr = pcl::registration::CorrespondenceRejector::Ptr;
- using KdTree = pcl::search::KdTree<PointTarget>;
- using KdTreePtr = typename KdTree::Ptr;
-
- using KdTreeReciprocal = pcl::search::KdTree<PointSource>;
- using KdTreeReciprocalPtr = typename KdTreeReciprocal::Ptr;
-
- using PointCloudSource = pcl::PointCloud<PointSource>;
- using PointCloudSourcePtr = typename PointCloudSource::Ptr;
- using PointCloudSourceConstPtr = typename PointCloudSource::ConstPtr;
-
- using PointCloudTarget = pcl::PointCloud<PointTarget>;
- using PointCloudTargetPtr = typename PointCloudTarget::Ptr;
- using PointCloudTargetConstPtr = typename PointCloudTarget::ConstPtr;
-
- using PointRepresentationConstPtr = typename KdTree::PointRepresentationConstPtr;
-
- using TransformationEstimation = typename pcl::registration::TransformationEstimation<PointSource, PointTarget, Scalar>;
- using TransformationEstimationPtr = typename TransformationEstimation::Ptr;
- using TransformationEstimationConstPtr = typename TransformationEstimation::ConstPtr;
-
- using CorrespondenceEstimation = pcl::registration::CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>;
- using CorrespondenceEstimationPtr = typename CorrespondenceEstimation::Ptr;
- using CorrespondenceEstimationConstPtr = typename CorrespondenceEstimation::ConstPtr;
-
- /** \brief The callback signature to the function updating intermediate source point cloud position
- * during it's registration to the target point cloud.
- * \param[in] cloud_src - the point cloud which will be updated to match target
- * \param[in] indices_src - a selector of points in cloud_src
- * \param[in] cloud_tgt - the point cloud we want to register against
- * \param[in] indices_tgt - a selector of points in cloud_tgt
- */
- using UpdateVisualizerCallbackSignature = void (const pcl::PointCloud<PointSource>&,
- const std::vector<int>&,
- const pcl::PointCloud<PointTarget>&,
- const std::vector<int>&);
-
- /** \brief Empty constructor. */
- Registration ()
- : tree_ (new KdTree)
- , tree_reciprocal_ (new KdTreeReciprocal)
- , nr_iterations_ (0)
- , max_iterations_ (10)
- , ransac_iterations_ (0)
- , target_ ()
- , final_transformation_ (Matrix4::Identity ())
- , transformation_ (Matrix4::Identity ())
- , previous_transformation_ (Matrix4::Identity ())
- , transformation_epsilon_ (0.0)
- , transformation_rotation_epsilon_(0.0)
- , euclidean_fitness_epsilon_ (-std::numeric_limits<double>::max ())
- , corr_dist_threshold_ (std::sqrt (std::numeric_limits<double>::max ()))
- , inlier_threshold_ (0.05)
- , converged_ (false)
- , min_number_correspondences_ (3)
- , correspondences_ (new Correspondences)
- , transformation_estimation_ ()
- , correspondence_estimation_ ()
- , target_cloud_updated_ (true)
- , source_cloud_updated_ (true)
- , force_no_recompute_ (false)
- , force_no_recompute_reciprocal_ (false)
- , point_representation_ ()
- {
- }
-
- /** \brief destructor. */
- ~Registration () {}
-
- /** \brief Provide a pointer to the transformation estimation object.
- * (e.g., SVD, point to plane etc.)
- *
- * \param[in] te is the pointer to the corresponding transformation estimation object
- *
- * Code example:
- *
- * \code
- * TransformationEstimationPointToPlaneLLS<PointXYZ, PointXYZ>::Ptr trans_lls (new TransformationEstimationPointToPlaneLLS<PointXYZ, PointXYZ>);
- * icp.setTransformationEstimation (trans_lls);
- * // or...
- * TransformationEstimationSVD<PointXYZ, PointXYZ>::Ptr trans_svd (new TransformationEstimationSVD<PointXYZ, PointXYZ>);
- * icp.setTransformationEstimation (trans_svd);
- * \endcode
- */
- void
- setTransformationEstimation (const TransformationEstimationPtr &te) { transformation_estimation_ = te; }
-
- /** \brief Provide a pointer to the correspondence estimation object.
- * (e.g., regular, reciprocal, normal shooting etc.)
- *
- * \param[in] ce is the pointer to the corresponding correspondence estimation object
- *
- * Code example:
- *
- * \code
- * CorrespondenceEstimation<PointXYZ, PointXYZ>::Ptr ce (new CorrespondenceEstimation<PointXYZ, PointXYZ>);
- * ce->setInputSource (source);
- * ce->setInputTarget (target);
- * icp.setCorrespondenceEstimation (ce);
- * // or...
- * CorrespondenceEstimationNormalShooting<PointNormal, PointNormal, PointNormal>::Ptr cens (new CorrespondenceEstimationNormalShooting<PointNormal, PointNormal>);
- * ce->setInputSource (source);
- * ce->setInputTarget (target);
- * ce->setSourceNormals (source);
- * ce->setTargetNormals (target);
- * icp.setCorrespondenceEstimation (cens);
- * \endcode
- */
- void
- setCorrespondenceEstimation (const CorrespondenceEstimationPtr &ce) { correspondence_estimation_ = ce; }
-
- /** \brief Provide a pointer to the input source
- * (e.g., the point cloud that we want to align to the target)
- *
- * \param[in] cloud the input point cloud source
- */
- virtual void
- setInputSource (const PointCloudSourceConstPtr &cloud)
- {
- source_cloud_updated_ = true;
- PCLBase<PointSource>::setInputCloud (cloud);
- }
-
- /** \brief Get a pointer to the input point cloud dataset target. */
- inline PointCloudSourceConstPtr const
- getInputSource () { return (input_ ); }
-
- /** \brief Provide a pointer to the input target (e.g., the point cloud that we want to align the input source to)
- * \param[in] cloud the input point cloud target
- */
- virtual inline void
- setInputTarget (const PointCloudTargetConstPtr &cloud);
-
- /** \brief Get a pointer to the input point cloud dataset target. */
- inline PointCloudTargetConstPtr const
- getInputTarget () { return (target_ ); }
-
-
- /** \brief Provide a pointer to the search object used to find correspondences in
- * the target cloud.
- * \param[in] tree a pointer to the spatial search object.
- * \param[in] force_no_recompute If set to true, this tree will NEVER be
- * recomputed, regardless of calls to setInputTarget. Only use if you are
- * confident that the tree will be set correctly.
- */
- inline void
- setSearchMethodTarget (const KdTreePtr &tree,
- bool force_no_recompute = false)
- {
- tree_ = tree;
- if (force_no_recompute)
- {
- force_no_recompute_ = true;
- }
- // Since we just set a new tree, we need to check for updates
- target_cloud_updated_ = true;
- }
-
- /** \brief Get a pointer to the search method used to find correspondences in the
- * target cloud. */
- inline KdTreePtr
- getSearchMethodTarget () const
- {
- return (tree_);
- }
-
- /** \brief Provide a pointer to the search object used to find correspondences in
- * the source cloud (usually used by reciprocal correspondence finding).
- * \param[in] tree a pointer to the spatial search object.
- * \param[in] force_no_recompute If set to true, this tree will NEVER be
- * recomputed, regardless of calls to setInputSource. Only use if you are
- * extremely confident that the tree will be set correctly.
- */
- inline void
- setSearchMethodSource (const KdTreeReciprocalPtr &tree,
- bool force_no_recompute = false)
- {
- tree_reciprocal_ = tree;
- if ( force_no_recompute )
- {
- force_no_recompute_reciprocal_ = true;
- }
- // Since we just set a new tree, we need to check for updates
- source_cloud_updated_ = true;
- }
-
- /** \brief Get a pointer to the search method used to find correspondences in the
- * source cloud. */
- inline KdTreeReciprocalPtr
- getSearchMethodSource () const
- {
- return (tree_reciprocal_);
- }
-
- /** \brief Get the final transformation matrix estimated by the registration method. */
- inline Matrix4
- getFinalTransformation () { return (final_transformation_); }
-
- /** \brief Get the last incremental transformation matrix estimated by the registration method. */
- inline Matrix4
- getLastIncrementalTransformation () { return (transformation_); }
-
- /** \brief Set the maximum number of iterations the internal optimization should run for.
- * \param[in] nr_iterations the maximum number of iterations the internal optimization should run for
- */
- inline void
- setMaximumIterations (int nr_iterations) { max_iterations_ = nr_iterations; }
-
- /** \brief Get the maximum number of iterations the internal optimization should run for, as set by the user. */
- inline int
- getMaximumIterations () { return (max_iterations_); }
-
- /** \brief Set the number of iterations RANSAC should run for.
- * \param[in] ransac_iterations is the number of iterations RANSAC should run for
- */
- inline void
- setRANSACIterations (int ransac_iterations) { ransac_iterations_ = ransac_iterations; }
-
- /** \brief Get the number of iterations RANSAC should run for, as set by the user. */
- inline double
- getRANSACIterations () { return (ransac_iterations_); }
-
- /** \brief Set the inlier distance threshold for the internal RANSAC outlier rejection loop.
- *
- * The method considers a point to be an inlier, if the distance between the target data index and the transformed
- * source index is smaller than the given inlier distance threshold.
- * The value is set by default to 0.05m.
- * \param[in] inlier_threshold the inlier distance threshold for the internal RANSAC outlier rejection loop
- */
- inline void
- setRANSACOutlierRejectionThreshold (double inlier_threshold) { inlier_threshold_ = inlier_threshold; }
-
- /** \brief Get the inlier distance threshold for the internal outlier rejection loop as set by the user. */
- inline double
- getRANSACOutlierRejectionThreshold () { return (inlier_threshold_); }
-
- /** \brief Set the maximum distance threshold between two correspondent points in source <-> target. If the
- * distance is larger than this threshold, the points will be ignored in the alignment process.
- * \param[in] distance_threshold the maximum distance threshold between a point and its nearest neighbor
- * correspondent in order to be considered in the alignment process
- */
- inline void
- setMaxCorrespondenceDistance (double distance_threshold) { corr_dist_threshold_ = distance_threshold; }
-
- /** \brief Get the maximum distance threshold between two correspondent points in source <-> target. If the
- * distance is larger than this threshold, the points will be ignored in the alignment process.
- */
- inline double
- getMaxCorrespondenceDistance () { return (corr_dist_threshold_); }
-
- /** \brief Set the transformation epsilon (maximum allowable translation squared difference between two consecutive
- * transformations) in order for an optimization to be considered as having converged to the final
- * solution.
- * \param[in] epsilon the transformation epsilon in order for an optimization to be considered as having
- * converged to the final solution.
- */
- inline void
- setTransformationEpsilon (double epsilon) { transformation_epsilon_ = epsilon; }
-
- /** \brief Get the transformation epsilon (maximum allowable translation squared difference between two consecutive
- * transformations) as set by the user.
- */
- inline double
- getTransformationEpsilon () { return (transformation_epsilon_); }
-
- /** \brief Set the transformation rotation epsilon (maximum allowable rotation difference between two consecutive
- * transformations) in order for an optimization to be considered as having converged to the final
- * solution.
- * \param[in] epsilon the transformation rotation epsilon in order for an optimization to be considered as having
- * converged to the final solution (epsilon is the cos(angle) in a axis-angle representation).
- */
- inline void
- setTransformationRotationEpsilon (double epsilon) { transformation_rotation_epsilon_ = epsilon; }
-
- /** \brief Get the transformation rotation epsilon (maximum allowable difference between two consecutive
- * transformations) as set by the user (epsilon is the cos(angle) in a axis-angle representation).
- */
- inline double
- getTransformationRotationEpsilon () { return (transformation_rotation_epsilon_); }
-
- /** \brief Set the maximum allowed Euclidean error between two consecutive steps in the ICP loop, before
- * the algorithm is considered to have converged.
- * The error is estimated as the sum of the differences between correspondences in an Euclidean sense,
- * divided by the number of correspondences.
- * \param[in] epsilon the maximum allowed distance error before the algorithm will be considered to have
- * converged
- */
- inline void
- setEuclideanFitnessEpsilon (double epsilon) { euclidean_fitness_epsilon_ = epsilon; }
-
- /** \brief Get the maximum allowed distance error before the algorithm will be considered to have converged,
- * as set by the user. See \ref setEuclideanFitnessEpsilon
- */
- inline double
- getEuclideanFitnessEpsilon () { return (euclidean_fitness_epsilon_); }
-
- /** \brief Provide a boost shared pointer to the PointRepresentation to be used when comparing points
- * \param[in] point_representation the PointRepresentation to be used by the k-D tree
- */
- inline void
- setPointRepresentation (const PointRepresentationConstPtr &point_representation)
- {
- point_representation_ = point_representation;
- }
-
- /** \brief Register the user callback function which will be called from registration thread
- * in order to update point cloud obtained after each iteration
- * \param[in] visualizerCallback reference of the user callback function
- */
- inline bool
- registerVisualizationCallback (std::function<UpdateVisualizerCallbackSignature> &visualizerCallback)
- {
- if (visualizerCallback)
- {
- update_visualizer_ = visualizerCallback;
- return (true);
- }
- return (false);
- }
-
- /** \brief Obtain the Euclidean fitness score (e.g., mean of squared distances from the source to the target)
- * \param[in] max_range maximum allowable distance between a point and its correspondence in the target
- * (default: double::max)
- */
- inline double
- getFitnessScore (double max_range = std::numeric_limits<double>::max ());
-
- /** \brief Obtain the Euclidean fitness score (e.g., mean of squared distances from the source to the target)
- * from two sets of correspondence distances (distances between source and target points)
- * \param[in] distances_a the first set of distances between correspondences
- * \param[in] distances_b the second set of distances between correspondences
- */
- inline double
- getFitnessScore (const std::vector<float> &distances_a, const std::vector<float> &distances_b);
-
- /** \brief Return the state of convergence after the last align run */
- inline bool
- hasConverged () const { return (converged_); }
-
- /** \brief Call the registration algorithm which estimates the transformation and returns the transformed source
- * (input) as \a output.
- * \param[out] output the resultant input transformed point cloud dataset
- */
- inline void
- align (PointCloudSource &output);
-
- /** \brief Call the registration algorithm which estimates the transformation and returns the transformed source
- * (input) as \a output.
- * \param[out] output the resultant input transformed point cloud dataset
- * \param[in] guess the initial gross estimation of the transformation
- */
- inline void
- align (PointCloudSource &output, const Matrix4& guess);
-
- /** \brief Abstract class get name method. */
- inline const std::string&
- getClassName () const { return (reg_name_); }
-
- /** \brief Internal computation initialization. */
- bool
- initCompute ();
-
- /** \brief Internal computation when reciprocal lookup is needed */
- bool
- initComputeReciprocal ();
-
- /** \brief Add a new correspondence rejector to the list
- * \param[in] rejector the new correspondence rejector to concatenate
- *
- * Code example:
- *
- * \code
- * CorrespondenceRejectorDistance rej;
- * rej.setInputCloud<PointXYZ> (keypoints_src);
- * rej.setInputTarget<PointXYZ> (keypoints_tgt);
- * rej.setMaximumDistance (1);
- * rej.setInputCorrespondences (all_correspondences);
- *
- * // or...
- *
- * \endcode
- */
- inline void
- addCorrespondenceRejector (const CorrespondenceRejectorPtr &rejector)
- {
- correspondence_rejectors_.push_back (rejector);
- }
-
- /** \brief Get the list of correspondence rejectors. */
- inline std::vector<CorrespondenceRejectorPtr>
- getCorrespondenceRejectors ()
- {
- return (correspondence_rejectors_);
- }
-
- /** \brief Remove the i-th correspondence rejector in the list
- * \param[in] i the position of the correspondence rejector in the list to remove
- */
- inline bool
- removeCorrespondenceRejector (unsigned int i)
- {
- if (i >= correspondence_rejectors_.size ())
- return (false);
- correspondence_rejectors_.erase (correspondence_rejectors_.begin () + i);
- return (true);
- }
-
- /** \brief Clear the list of correspondence rejectors. */
- inline void
- clearCorrespondenceRejectors ()
- {
- correspondence_rejectors_.clear ();
- }
-
- protected:
- /** \brief The registration method name. */
- std::string reg_name_;
-
- /** \brief A pointer to the spatial search object. */
- KdTreePtr tree_;
-
- /** \brief A pointer to the spatial search object of the source. */
- KdTreeReciprocalPtr tree_reciprocal_;
-
- /** \brief The number of iterations the internal optimization ran for (used internally). */
- int nr_iterations_;
-
- /** \brief The maximum number of iterations the internal optimization should run for.
- * The default value is 10.
- */
- int max_iterations_;
-
- /** \brief The number of iterations RANSAC should run for. */
- int ransac_iterations_;
-
- /** \brief The input point cloud dataset target. */
- PointCloudTargetConstPtr target_;
-
- /** \brief The final transformation matrix estimated by the registration method after N iterations. */
- Matrix4 final_transformation_;
-
- /** \brief The transformation matrix estimated by the registration method. */
- Matrix4 transformation_;
-
- /** \brief The previous transformation matrix estimated by the registration method (used internally). */
- Matrix4 previous_transformation_;
-
- /** \brief The maximum difference between two consecutive transformations in order to consider convergence
- * (user defined).
- */
- double transformation_epsilon_;
-
- /** \brief The maximum rotation difference between two consecutive transformations in order to consider convergence
- * (user defined).
- */
- double transformation_rotation_epsilon_;
-
- /** \brief The maximum allowed Euclidean error between two consecutive steps in the ICP loop, before the
- * algorithm is considered to have converged. The error is estimated as the sum of the differences between
- * correspondences in an Euclidean sense, divided by the number of correspondences.
- */
- double euclidean_fitness_epsilon_;
-
- /** \brief The maximum distance threshold between two correspondent points in source <-> target. If the
- * distance is larger than this threshold, the points will be ignored in the alignment process.
- */
- double corr_dist_threshold_;
-
- /** \brief The inlier distance threshold for the internal RANSAC outlier rejection loop.
- * The method considers a point to be an inlier, if the distance between the target data index and the transformed
- * source index is smaller than the given inlier distance threshold. The default value is 0.05.
- */
- double inlier_threshold_;
-
- /** \brief Holds internal convergence state, given user parameters. */
- bool converged_;
-
- /** \brief The minimum number of correspondences that the algorithm needs before attempting to estimate the
- * transformation. The default value is 3.
- */
- int min_number_correspondences_;
-
- /** \brief The set of correspondences determined at this ICP step. */
- CorrespondencesPtr correspondences_;
-
- /** \brief A TransformationEstimation object, used to calculate the 4x4 rigid transformation. */
- TransformationEstimationPtr transformation_estimation_;
-
- /** \brief A CorrespondenceEstimation object, used to estimate correspondences between the source and the target cloud. */
- CorrespondenceEstimationPtr correspondence_estimation_;
-
- /** \brief The list of correspondence rejectors to use. */
- std::vector<CorrespondenceRejectorPtr> correspondence_rejectors_;
-
- /** \brief Variable that stores whether we have a new target cloud, meaning we need to pre-process it again.
- * This way, we avoid rebuilding the kd-tree for the target cloud every time the determineCorrespondences () method
- * is called. */
- bool target_cloud_updated_;
- /** \brief Variable that stores whether we have a new source cloud, meaning we need to pre-process it again.
- * This way, we avoid rebuilding the reciprocal kd-tree for the source cloud every time the determineCorrespondences () method
- * is called. */
- bool source_cloud_updated_;
- /** \brief A flag which, if set, means the tree operating on the target cloud
- * will never be recomputed*/
- bool force_no_recompute_;
-
- /** \brief A flag which, if set, means the tree operating on the source cloud
- * will never be recomputed*/
- bool force_no_recompute_reciprocal_;
-
- /** \brief Callback function to update intermediate source point cloud position during it's registration
- * to the target point cloud.
- */
- std::function<UpdateVisualizerCallbackSignature> update_visualizer_;
-
- /** \brief Search for the closest nearest neighbor of a given point.
- * \param cloud the point cloud dataset to use for nearest neighbor search
- * \param index the index of the query point
- * \param indices the resultant vector of indices representing the k-nearest neighbors
- * \param distances the resultant distances from the query point to the k-nearest neighbors
- */
- inline bool
- searchForNeighbors (const PointCloudSource &cloud, int index,
- std::vector<int> &indices, std::vector<float> &distances)
- {
- int k = tree_->nearestKSearch (cloud, index, 1, indices, distances);
- if (k == 0)
- return (false);
- return (true);
- }
-
- /** \brief Abstract transformation computation method with initial guess */
- virtual void
- computeTransformation (PointCloudSource &output, const Matrix4& guess) = 0;
-
- private:
- /** \brief The point representation used (internal). */
- PointRepresentationConstPtr point_representation_;
-
- /**
- * \brief Remove from public API in favor of \ref setInputSource
- *
- * Still gives the correct result (with a warning)
- */
- void setInputCloud (const PointCloudSourceConstPtr &cloud) override
- {
- PCL_WARN ("[pcl::registration::Registration] setInputCloud is deprecated."
- "Please use setInputSource instead.\n");
- setInputSource (cloud);
- }
-
- public:
- PCL_MAKE_ALIGNED_OPERATOR_NEW
- };
-}
+ /** \brief Get the final transformation matrix estimated by the registration method.
+ */
+ inline Matrix4
+ getFinalTransformation()
+ {
+ return (final_transformation_);
+ }
+
+ /** \brief Get the last incremental transformation matrix estimated by the
+ * registration method. */
+ inline Matrix4
+ getLastIncrementalTransformation()
+ {
+ return (transformation_);
+ }
+
+ /** \brief Set the maximum number of iterations the internal optimization should run
+ * for. \param[in] nr_iterations the maximum number of iterations the internal
+ * optimization should run for
+ */
+ inline void
+ setMaximumIterations(int nr_iterations)
+ {
+ max_iterations_ = nr_iterations;
+ }
+
+ /** \brief Get the maximum number of iterations the internal optimization should run
+ * for, as set by the user. */
+ inline int
+ getMaximumIterations()
+ {
+ return (max_iterations_);
+ }
+
+ /** \brief Set the number of iterations RANSAC should run for.
+ * \param[in] ransac_iterations is the number of iterations RANSAC should run for
+ */
+ inline void
+ setRANSACIterations(int ransac_iterations)
+ {
+ ransac_iterations_ = ransac_iterations;
+ }
+
+ /** \brief Get the number of iterations RANSAC should run for, as set by the user. */
+ inline double
+ getRANSACIterations()
+ {
+ return (ransac_iterations_);
+ }
+
+ /** \brief Set the inlier distance threshold for the internal RANSAC outlier rejection
+ * loop.
+ *
+ * The method considers a point to be an inlier, if the distance between the target
+ * data index and the transformed source index is smaller than the given inlier
+ * distance threshold. The value is set by default to 0.05m. \param[in]
+ * inlier_threshold the inlier distance threshold for the internal RANSAC outlier
+ * rejection loop
+ */
+ inline void
+ setRANSACOutlierRejectionThreshold(double inlier_threshold)
+ {
+ inlier_threshold_ = inlier_threshold;
+ }
+
+ /** \brief Get the inlier distance threshold for the internal outlier rejection loop
+ * as set by the user. */
+ inline double
+ getRANSACOutlierRejectionThreshold()
+ {
+ return (inlier_threshold_);
+ }
+
+ /** \brief Set the maximum distance threshold between two correspondent points in
+ * source <-> target. If the distance is larger than this threshold, the points will
+ * be ignored in the alignment process. \param[in] distance_threshold the maximum
+ * distance threshold between a point and its nearest neighbor correspondent in order
+ * to be considered in the alignment process
+ */
+ inline void
+ setMaxCorrespondenceDistance(double distance_threshold)
+ {
+ corr_dist_threshold_ = distance_threshold;
+ }
+
+ /** \brief Get the maximum distance threshold between two correspondent points in
+ * source <-> target. If the distance is larger than this threshold, the points will
+ * be ignored in the alignment process.
+ */
+ inline double
+ getMaxCorrespondenceDistance()
+ {
+ return (corr_dist_threshold_);
+ }
+
+ /** \brief Set the transformation epsilon (maximum allowable translation squared
+ * difference between two consecutive transformations) in order for an optimization to
+ * be considered as having converged to the final solution. \param[in] epsilon the
+ * transformation epsilon in order for an optimization to be considered as having
+ * converged to the final solution.
+ */
+ inline void
+ setTransformationEpsilon(double epsilon)
+ {
+ transformation_epsilon_ = epsilon;
+ }
+
+ /** \brief Get the transformation epsilon (maximum allowable translation squared
+ * difference between two consecutive transformations) as set by the user.
+ */
+ inline double
+ getTransformationEpsilon()
+ {
+ return (transformation_epsilon_);
+ }
+
+ /** \brief Set the transformation rotation epsilon (maximum allowable rotation
+ * difference between two consecutive transformations) in order for an optimization to
+ * be considered as having converged to the final solution. \param[in] epsilon the
+ * transformation rotation epsilon in order for an optimization to be considered as
+ * having converged to the final solution (epsilon is the cos(angle) in a axis-angle
+ * representation).
+ */
+ inline void
+ setTransformationRotationEpsilon(double epsilon)
+ {
+ transformation_rotation_epsilon_ = epsilon;
+ }
+
+ /** \brief Get the transformation rotation epsilon (maximum allowable difference
+ * between two consecutive transformations) as set by the user (epsilon is the
+ * cos(angle) in a axis-angle representation).
+ */
+ inline double
+ getTransformationRotationEpsilon()
+ {
+ return (transformation_rotation_epsilon_);
+ }
+
+ /** \brief Set the maximum allowed Euclidean error between two consecutive steps in
+ * the ICP loop, before the algorithm is considered to have converged. The error is
+ * estimated as the sum of the differences between correspondences in an Euclidean
+ * sense, divided by the number of correspondences. \param[in] epsilon the maximum
+ * allowed distance error before the algorithm will be considered to have converged
+ */
+ inline void
+ setEuclideanFitnessEpsilon(double epsilon)
+ {
+ euclidean_fitness_epsilon_ = epsilon;
+ }
+
+ /** \brief Get the maximum allowed distance error before the algorithm will be
+ * considered to have converged, as set by the user. See \ref
+ * setEuclideanFitnessEpsilon
+ */
+ inline double
+ getEuclideanFitnessEpsilon()
+ {
+ return (euclidean_fitness_epsilon_);
+ }
+
+ /** \brief Provide a boost shared pointer to the PointRepresentation to be used when
+ * comparing points \param[in] point_representation the PointRepresentation to be used
+ * by the k-D tree
+ */
+ inline void
+ setPointRepresentation(const PointRepresentationConstPtr& point_representation)
+ {
+ point_representation_ = point_representation;
+ }
+
+ /** \brief Register the user callback function which will be called from registration
+ * thread in order to update point cloud obtained after each iteration \param[in]
+ * visualizerCallback reference of the user callback function
+ */
+ inline bool
+ registerVisualizationCallback(
+ std::function<UpdateVisualizerCallbackSignature>& visualizerCallback)
+ {
+ if (visualizerCallback) {
+ update_visualizer_ = visualizerCallback;
+ return (true);
+ }
+ return (false);
+ }
+
+ /** \brief Obtain the Euclidean fitness score (e.g., mean of squared distances from
+ * the source to the target) \param[in] max_range maximum allowable distance between a
+ * point and its correspondence in the target (default: double::max)
+ */
+ inline double
+ getFitnessScore(double max_range = std::numeric_limits<double>::max());
+
+ /** \brief Obtain the Euclidean fitness score (e.g., mean of squared distances from
+ * the source to the target) from two sets of correspondence distances (distances
+ * between source and target points) \param[in] distances_a the first set of distances
+ * between correspondences \param[in] distances_b the second set of distances between
+ * correspondences
+ */
+ inline double
+ getFitnessScore(const std::vector<float>& distances_a,
+ const std::vector<float>& distances_b);
+
+ /** \brief Return the state of convergence after the last align run */
+ inline bool
+ hasConverged() const
+ {
+ return (converged_);
+ }
+
+ /** \brief Call the registration algorithm which estimates the transformation and
+ * returns the transformed source (input) as \a output. \param[out] output the
+ * resultant input transformed point cloud dataset
+ */
+ inline void
+ align(PointCloudSource& output);
+
+ /** \brief Call the registration algorithm which estimates the transformation and
+ * returns the transformed source (input) as \a output. \param[out] output the
+ * resultant input transformed point cloud dataset \param[in] guess the initial gross
+ * estimation of the transformation
+ */
+ inline void
+ align(PointCloudSource& output, const Matrix4& guess);
+
+ /** \brief Abstract class get name method. */
+ inline const std::string&
+ getClassName() const
+ {
+ return (reg_name_);
+ }
+
+ /** \brief Internal computation initialization. */
+ bool
+ initCompute();
+
+ /** \brief Internal computation when reciprocal lookup is needed */
+ bool
+ initComputeReciprocal();
+
+ /** \brief Add a new correspondence rejector to the list
+ * \param[in] rejector the new correspondence rejector to concatenate
+ *
+ * Code example:
+ *
+ * \code
+ * CorrespondenceRejectorDistance rej;
+ * rej.setInputCloud<PointXYZ> (keypoints_src);
+ * rej.setInputTarget<PointXYZ> (keypoints_tgt);
+ * rej.setMaximumDistance (1);
+ * rej.setInputCorrespondences (all_correspondences);
+ *
+ * // or...
+ *
+ * \endcode
+ */
+ inline void
+ addCorrespondenceRejector(const CorrespondenceRejectorPtr& rejector)
+ {
+ correspondence_rejectors_.push_back(rejector);
+ }
+
+ /** \brief Get the list of correspondence rejectors. */
+ inline std::vector<CorrespondenceRejectorPtr>
+ getCorrespondenceRejectors()
+ {
+ return (correspondence_rejectors_);
+ }
+
+ /** \brief Remove the i-th correspondence rejector in the list
+ * \param[in] i the position of the correspondence rejector in the list to remove
+ */
+ inline bool
+ removeCorrespondenceRejector(unsigned int i)
+ {
+ if (i >= correspondence_rejectors_.size())
+ return (false);
+ correspondence_rejectors_.erase(correspondence_rejectors_.begin() + i);
+ return (true);
+ }
+
+ /** \brief Clear the list of correspondence rejectors. */
+ inline void
+ clearCorrespondenceRejectors()
+ {
+ correspondence_rejectors_.clear();
+ }
+
+protected:
+ /** \brief The registration method name. */
+ std::string reg_name_;
+
+ /** \brief A pointer to the spatial search object. */
+ KdTreePtr tree_;
+
+ /** \brief A pointer to the spatial search object of the source. */
+ KdTreeReciprocalPtr tree_reciprocal_;
+
+ /** \brief The number of iterations the internal optimization ran for (used
+ * internally). */
+ int nr_iterations_;
+
+ /** \brief The maximum number of iterations the internal optimization should run for.
+ * The default value is 10.
+ */
+ int max_iterations_;
+
+ /** \brief The number of iterations RANSAC should run for. */
+ int ransac_iterations_;
+
+ /** \brief The input point cloud dataset target. */
+ PointCloudTargetConstPtr target_;
+
+ /** \brief The final transformation matrix estimated by the registration method after
+ * N iterations. */
+ Matrix4 final_transformation_;
+
+ /** \brief The transformation matrix estimated by the registration method. */
+ Matrix4 transformation_;
+
+ /** \brief The previous transformation matrix estimated by the registration method
+ * (used internally). */
+ Matrix4 previous_transformation_;
+
+ /** \brief The maximum difference between two consecutive transformations in order to
+ * consider convergence (user defined).
+ */
+ double transformation_epsilon_;
+
+ /** \brief The maximum rotation difference between two consecutive transformations in
+ * order to consider convergence (user defined).
+ */
+ double transformation_rotation_epsilon_;
+
+ /** \brief The maximum allowed Euclidean error between two consecutive steps in the
+ * ICP loop, before the algorithm is considered to have converged. The error is
+ * estimated as the sum of the differences between correspondences in an Euclidean
+ * sense, divided by the number of correspondences.
+ */
+ double euclidean_fitness_epsilon_;
+
+ /** \brief The maximum distance threshold between two correspondent points in source
+ * <-> target. If the distance is larger than this threshold, the points will be
+ * ignored in the alignment process.
+ */
+ double corr_dist_threshold_;
+
+ /** \brief The inlier distance threshold for the internal RANSAC outlier rejection
+ * loop. The method considers a point to be an inlier, if the distance between the
+ * target data index and the transformed source index is smaller than the given inlier
+ * distance threshold. The default value is 0.05.
+ */
+ double inlier_threshold_;
+
+ /** \brief Holds internal convergence state, given user parameters. */
+ bool converged_;
+
+ /** \brief The minimum number of correspondences that the algorithm needs before
+ * attempting to estimate the transformation. The default value is 3.
+ */
+ int min_number_correspondences_;
+
+ /** \brief The set of correspondences determined at this ICP step. */
+ CorrespondencesPtr correspondences_;
+
+ /** \brief A TransformationEstimation object, used to calculate the 4x4 rigid
+ * transformation. */
+ TransformationEstimationPtr transformation_estimation_;
+
+ /** \brief A CorrespondenceEstimation object, used to estimate correspondences between
+ * the source and the target cloud. */
+ CorrespondenceEstimationPtr correspondence_estimation_;
+
+ /** \brief The list of correspondence rejectors to use. */
+ std::vector<CorrespondenceRejectorPtr> correspondence_rejectors_;
+
+ /** \brief Variable that stores whether we have a new target cloud, meaning we need to
+ * pre-process it again. This way, we avoid rebuilding the kd-tree for the target
+ * cloud every time the determineCorrespondences () method is called. */
+ bool target_cloud_updated_;
+ /** \brief Variable that stores whether we have a new source cloud, meaning we need to
+ * pre-process it again. This way, we avoid rebuilding the reciprocal kd-tree for the
+ * source cloud every time the determineCorrespondences () method is called. */
+ bool source_cloud_updated_;
+ /** \brief A flag which, if set, means the tree operating on the target cloud
+ * will never be recomputed*/
+ bool force_no_recompute_;
+
+ /** \brief A flag which, if set, means the tree operating on the source cloud
+ * will never be recomputed*/
+ bool force_no_recompute_reciprocal_;
+
+ /** \brief Callback function to update intermediate source point cloud position during
+ * it's registration to the target point cloud.
+ */
+ std::function<UpdateVisualizerCallbackSignature> update_visualizer_;
+
+ /** \brief Search for the closest nearest neighbor of a given point.
+ * \param cloud the point cloud dataset to use for nearest neighbor search
+ * \param index the index of the query point
+ * \param indices the resultant vector of indices representing the k-nearest neighbors
+ * \param distances the resultant distances from the query point to the k-nearest
+ * neighbors
+ */
+ inline bool
+ searchForNeighbors(const PointCloudSource& cloud,
+ int index,
+ pcl::Indices& indices,
+ std::vector<float>& distances)
+ {
+ int k = tree_->nearestKSearch(cloud, index, 1, indices, distances);
+ if (k == 0)
+ return (false);
+ return (true);
+ }
+
+ /** \brief Abstract transformation computation method with initial guess */
+ virtual void
+ computeTransformation(PointCloudSource& output, const Matrix4& guess) = 0;
+
+private:
+ /** \brief The point representation used (internal). */
+ PointRepresentationConstPtr point_representation_;
+
+ /**
+ * \brief Remove from public API in favor of \ref setInputSource
+ *
+ * Still gives the correct result (with a warning)
+ */
+ void
+ setInputCloud(const PointCloudSourceConstPtr& cloud) override
+ {
+ PCL_WARN("[pcl::registration::Registration] setInputCloud is deprecated."
+ "Please use setInputSource instead.\n");
+ setInputSource(cloud);
+ }
+
+public:
+ PCL_MAKE_ALIGNED_OPERATOR_NEW
+};
+} // namespace pcl
#include <pcl/registration/impl/registration.hpp>
#pragma once
+#include <pcl/registration/correspondence_rejection_poly.h>
#include <pcl/registration/registration.h>
#include <pcl/registration/transformation_estimation_svd.h>
#include <pcl/registration/transformation_validation.h>
-#include <pcl/registration/correspondence_rejection_poly.h>
-namespace pcl
-{
- /** \brief Pose estimation and alignment class using a prerejective RANSAC routine.
- *
- * This class inserts a simple, yet effective "prerejection" step into the standard
- * RANSAC pose estimation loop in order to avoid verification of pose hypotheses
- * that are likely to be wrong. This is achieved by local pose-invariant geometric
- * constraints, as also implemented in the class
- * \ref registration::CorrespondenceRejectorPoly "CorrespondenceRejectorPoly".
- *
- * In order to robustly align partial/occluded models, this routine performs
- * fit error evaluation using only inliers, i.e. points closer than a
- * Euclidean threshold, which is specifiable using \ref setInlierFraction().
- *
- * The amount of prerejection or "greedyness" of the algorithm can be specified
- * using \ref setSimilarityThreshold() in [0,1[, where a value of 0 means disabled,
- * and 1 is maximally rejective.
- *
- * If you use this in academic work, please cite:
- *
- * A. G. Buch, D. Kraft, J.-K. Kämäräinen, H. G. Petersen and N. Krüger.
- * Pose Estimation using Local Structure-Specific Shape and Appearance Context.
- * International Conference on Robotics and Automation (ICRA), 2013.
- *
- * \author Anders Glent Buch (andersgb1@gmail.com)
- * \ingroup registration
+namespace pcl {
+/** \brief Pose estimation and alignment class using a prerejective RANSAC routine.
+ *
+ * This class inserts a simple, yet effective "prerejection" step into the standard
+ * RANSAC pose estimation loop in order to avoid verification of pose hypotheses
+ * that are likely to be wrong. This is achieved by local pose-invariant geometric
+ * constraints, as also implemented in the class
+ * \ref registration::CorrespondenceRejectorPoly "CorrespondenceRejectorPoly".
+ *
+ * In order to robustly align partial/occluded models, this routine performs
+ * fit error evaluation using only inliers, i.e. points closer than a
+ * Euclidean threshold, which is specifiable using \ref setInlierFraction().
+ *
+ * The amount of prerejection or "greedyness" of the algorithm can be specified
+ * using \ref setSimilarityThreshold() in [0,1[, where a value of 0 means disabled,
+ * and 1 is maximally rejective.
+ *
+ * If you use this in academic work, please cite:
+ *
+ * A. G. Buch, D. Kraft, J.-K. Kämäräinen, H. G. Petersen and N. Krüger.
+ * Pose Estimation using Local Structure-Specific Shape and Appearance Context.
+ * International Conference on Robotics and Automation (ICRA), 2013.
+ *
+ * \author Anders Glent Buch (andersgb1@gmail.com)
+ * \ingroup registration
+ */
+template <typename PointSource, typename PointTarget, typename FeatureT>
+class SampleConsensusPrerejective : public Registration<PointSource, PointTarget> {
+public:
+ using Matrix4 = typename Registration<PointSource, PointTarget>::Matrix4;
+
+ using Registration<PointSource, PointTarget>::reg_name_;
+ using Registration<PointSource, PointTarget>::getClassName;
+ using Registration<PointSource, PointTarget>::input_;
+ using Registration<PointSource, PointTarget>::target_;
+ using Registration<PointSource, PointTarget>::tree_;
+ using Registration<PointSource, PointTarget>::max_iterations_;
+ using Registration<PointSource, PointTarget>::corr_dist_threshold_;
+ using Registration<PointSource, PointTarget>::transformation_;
+ using Registration<PointSource, PointTarget>::final_transformation_;
+ using Registration<PointSource, PointTarget>::transformation_estimation_;
+ using Registration<PointSource, PointTarget>::getFitnessScore;
+ using Registration<PointSource, PointTarget>::converged_;
+
+ using PointCloudSource =
+ typename Registration<PointSource, PointTarget>::PointCloudSource;
+ using PointCloudSourcePtr = typename PointCloudSource::Ptr;
+ using PointCloudSourceConstPtr = typename PointCloudSource::ConstPtr;
+
+ using PointCloudTarget =
+ typename Registration<PointSource, PointTarget>::PointCloudTarget;
+
+ using PointIndicesPtr = PointIndices::Ptr;
+ using PointIndicesConstPtr = PointIndices::ConstPtr;
+
+ using FeatureCloud = pcl::PointCloud<FeatureT>;
+ using FeatureCloudPtr = typename FeatureCloud::Ptr;
+ using FeatureCloudConstPtr = typename FeatureCloud::ConstPtr;
+
+ using Ptr =
+ shared_ptr<SampleConsensusPrerejective<PointSource, PointTarget, FeatureT>>;
+ using ConstPtr =
+ shared_ptr<const SampleConsensusPrerejective<PointSource, PointTarget, FeatureT>>;
+
+ using FeatureKdTreePtr = typename KdTreeFLANN<FeatureT>::Ptr;
+
+ using CorrespondenceRejectorPoly =
+ pcl::registration::CorrespondenceRejectorPoly<PointSource, PointTarget>;
+ using CorrespondenceRejectorPolyPtr = typename CorrespondenceRejectorPoly::Ptr;
+ using CorrespondenceRejectorPolyConstPtr =
+ typename CorrespondenceRejectorPoly::ConstPtr;
+
+ /** \brief Constructor */
+ SampleConsensusPrerejective()
+ : input_features_()
+ , target_features_()
+ , nr_samples_(3)
+ , k_correspondences_(2)
+ , feature_tree_(new pcl::KdTreeFLANN<FeatureT>)
+ , correspondence_rejector_poly_(new CorrespondenceRejectorPoly)
+ , inlier_fraction_(0.0f)
+ {
+ reg_name_ = "SampleConsensusPrerejective";
+ correspondence_rejector_poly_->setSimilarityThreshold(0.6f);
+ max_iterations_ = 5000;
+ transformation_estimation_.reset(
+ new pcl::registration::TransformationEstimationSVD<PointSource, PointTarget>);
+ };
+
+ /** \brief Destructor */
+ ~SampleConsensusPrerejective() {}
+
+ /** \brief Provide a boost shared pointer to the source point cloud's feature
+ * descriptors \param features the source point cloud's features
+ */
+ void
+ setSourceFeatures(const FeatureCloudConstPtr& features);
+
+ /** \brief Get a pointer to the source point cloud's features */
+ inline const FeatureCloudConstPtr
+ getSourceFeatures() const
+ {
+ return (input_features_);
+ }
+
+ /** \brief Provide a boost shared pointer to the target point cloud's feature
+ * descriptors \param features the target point cloud's features
+ */
+ void
+ setTargetFeatures(const FeatureCloudConstPtr& features);
+
+ /** \brief Get a pointer to the target point cloud's features */
+ inline const FeatureCloudConstPtr
+ getTargetFeatures() const
+ {
+ return (target_features_);
+ }
+
+ /** \brief Set the number of samples to use during each iteration
+ * \param nr_samples the number of samples to use during each iteration
+ */
+ inline void
+ setNumberOfSamples(int nr_samples)
+ {
+ nr_samples_ = nr_samples;
+ }
+
+ /** \brief Get the number of samples to use during each iteration, as set by the user
*/
- template <typename PointSource, typename PointTarget, typename FeatureT>
- class SampleConsensusPrerejective : public Registration<PointSource, PointTarget>
+ inline int
+ getNumberOfSamples() const
{
- public:
- using Matrix4 = typename Registration<PointSource, PointTarget>::Matrix4;
-
- using Registration<PointSource, PointTarget>::reg_name_;
- using Registration<PointSource, PointTarget>::getClassName;
- using Registration<PointSource, PointTarget>::input_;
- using Registration<PointSource, PointTarget>::target_;
- using Registration<PointSource, PointTarget>::tree_;
- using Registration<PointSource, PointTarget>::max_iterations_;
- using Registration<PointSource, PointTarget>::corr_dist_threshold_;
- using Registration<PointSource, PointTarget>::transformation_;
- using Registration<PointSource, PointTarget>::final_transformation_;
- using Registration<PointSource, PointTarget>::transformation_estimation_;
- using Registration<PointSource, PointTarget>::getFitnessScore;
- using Registration<PointSource, PointTarget>::converged_;
-
- using PointCloudSource = typename Registration<PointSource, PointTarget>::PointCloudSource;
- using PointCloudSourcePtr = typename PointCloudSource::Ptr;
- using PointCloudSourceConstPtr = typename PointCloudSource::ConstPtr;
-
- using PointCloudTarget = typename Registration<PointSource, PointTarget>::PointCloudTarget;
-
- using PointIndicesPtr = PointIndices::Ptr;
- using PointIndicesConstPtr = PointIndices::ConstPtr;
-
- using FeatureCloud = pcl::PointCloud<FeatureT>;
- using FeatureCloudPtr = typename FeatureCloud::Ptr;
- using FeatureCloudConstPtr = typename FeatureCloud::ConstPtr;
-
- using Ptr = shared_ptr<SampleConsensusPrerejective<PointSource, PointTarget, FeatureT> >;
- using ConstPtr = shared_ptr<const SampleConsensusPrerejective<PointSource, PointTarget, FeatureT> >;
-
- using FeatureKdTreePtr = typename KdTreeFLANN<FeatureT>::Ptr;
-
- using CorrespondenceRejectorPoly = pcl::registration::CorrespondenceRejectorPoly<PointSource, PointTarget>;
- using CorrespondenceRejectorPolyPtr = typename CorrespondenceRejectorPoly::Ptr;
- using CorrespondenceRejectorPolyConstPtr = typename CorrespondenceRejectorPoly::ConstPtr;
-
- /** \brief Constructor */
- SampleConsensusPrerejective ()
- : input_features_ ()
- , target_features_ ()
- , nr_samples_(3)
- , k_correspondences_ (2)
- , feature_tree_ (new pcl::KdTreeFLANN<FeatureT>)
- , correspondence_rejector_poly_ (new CorrespondenceRejectorPoly)
- , inlier_fraction_ (0.0f)
- {
- reg_name_ = "SampleConsensusPrerejective";
- correspondence_rejector_poly_->setSimilarityThreshold (0.6f);
- max_iterations_ = 5000;
- transformation_estimation_.reset (new pcl::registration::TransformationEstimationSVD<PointSource, PointTarget>);
- };
-
- /** \brief Destructor */
- ~SampleConsensusPrerejective ()
- {
- }
-
- /** \brief Provide a boost shared pointer to the source point cloud's feature descriptors
- * \param features the source point cloud's features
- */
- void
- setSourceFeatures (const FeatureCloudConstPtr &features);
-
- /** \brief Get a pointer to the source point cloud's features */
- inline const FeatureCloudConstPtr
- getSourceFeatures () const
- {
- return (input_features_);
- }
-
- /** \brief Provide a boost shared pointer to the target point cloud's feature descriptors
- * \param features the target point cloud's features
- */
- void
- setTargetFeatures (const FeatureCloudConstPtr &features);
-
- /** \brief Get a pointer to the target point cloud's features */
- inline const FeatureCloudConstPtr
- getTargetFeatures () const
- {
- return (target_features_);
- }
-
- /** \brief Set the number of samples to use during each iteration
- * \param nr_samples the number of samples to use during each iteration
- */
- inline void
- setNumberOfSamples (int nr_samples)
- {
- nr_samples_ = nr_samples;
- }
-
- /** \brief Get the number of samples to use during each iteration, as set by the user */
- inline int
- getNumberOfSamples () const
- {
- return (nr_samples_);
- }
-
- /** \brief Set the number of neighbors to use when selecting a random feature correspondence. A higher value will
- * add more randomness to the feature matching.
- * \param k the number of neighbors to use when selecting a random feature correspondence.
- */
- inline void
- setCorrespondenceRandomness (int k)
- {
- k_correspondences_ = k;
- }
-
- /** \brief Get the number of neighbors used when selecting a random feature correspondence, as set by the user */
- inline int
- getCorrespondenceRandomness () const
- {
- return (k_correspondences_);
- }
-
- /** \brief Set the similarity threshold in [0,1[ between edge lengths of the underlying polygonal correspondence rejector object,
- * where 1 is a perfect match
- * \param similarity_threshold edge length similarity threshold
- */
- inline void
- setSimilarityThreshold (float similarity_threshold)
- {
- correspondence_rejector_poly_->setSimilarityThreshold (similarity_threshold);
- }
-
- /** \brief Get the similarity threshold between edge lengths of the underlying polygonal correspondence rejector object,
- * \return edge length similarity threshold
- */
- inline float
- getSimilarityThreshold () const
- {
- return correspondence_rejector_poly_->getSimilarityThreshold ();
- }
-
- /** \brief Set the required inlier fraction (of the input)
- * \param inlier_fraction required inlier fraction, must be in [0,1]
- */
- inline void
- setInlierFraction (float inlier_fraction)
- {
- inlier_fraction_ = inlier_fraction;
- }
-
- /** \brief Get the required inlier fraction
- * \return required inlier fraction in [0,1]
- */
- inline float
- getInlierFraction () const
- {
- return inlier_fraction_;
- }
-
- /** \brief Get the inlier indices of the source point cloud under the final transformation
- * @return inlier indices
- */
- inline const std::vector<int>&
- getInliers () const
- {
- return inliers_;
- }
-
- protected:
- /** \brief Choose a random index between 0 and n-1
- * \param n the number of possible indices to choose from
- */
- inline int
- getRandomIndex (int n) const
- {
- return (static_cast<int> (n * (rand () / (RAND_MAX + 1.0))));
- };
-
- /** \brief Select \a nr_samples sample points from cloud while making sure that their pairwise distances are
- * greater than a user-defined minimum distance, \a min_sample_distance.
- * \param cloud the input point cloud
- * \param nr_samples the number of samples to select
- * \param sample_indices the resulting sample indices
- */
- void
- selectSamples (const PointCloudSource &cloud, int nr_samples, std::vector<int> &sample_indices);
-
- /** \brief For each of the sample points, find a list of points in the target cloud whose features are similar to
- * the sample points' features. From these, select one randomly which will be considered that sample point's
- * correspondence.
- * \param sample_indices the indices of each sample point
- * \param similar_features correspondence cache, which is used to read/write already computed correspondences
- * \param corresponding_indices the resulting indices of each sample's corresponding point in the target cloud
- */
- void
- findSimilarFeatures (const std::vector<int> &sample_indices,
- std::vector<std::vector<int> >& similar_features,
- std::vector<int> &corresponding_indices);
-
- /** \brief Rigid transformation computation method.
- * \param output the transformed input point cloud dataset using the rigid transformation found
- * \param guess The computed transformation
- */
- void
- computeTransformation (PointCloudSource &output, const Eigen::Matrix4f& guess) override;
-
- /** \brief Obtain the fitness of a transformation
- * The following metrics are calculated, based on
- * \b final_transformation_ and \b corr_dist_threshold_:
- * - Inliers: the number of transformed points which are closer than threshold to NN
- * - Error score: the MSE of the inliers
- * \param inliers indices of source point cloud inliers
- * \param fitness_score output fitness score as RMSE
- */
- void
- getFitness (std::vector<int>& inliers, float& fitness_score);
-
- /** \brief The source point cloud's feature descriptors. */
- FeatureCloudConstPtr input_features_;
-
- /** \brief The target point cloud's feature descriptors. */
- FeatureCloudConstPtr target_features_;
-
- /** \brief The number of samples to use during each iteration. */
- int nr_samples_;
-
- /** \brief The number of neighbors to use when selecting a random feature correspondence. */
- int k_correspondences_;
-
- /** \brief The KdTree used to compare feature descriptors. */
- FeatureKdTreePtr feature_tree_;
-
- /** \brief The polygonal correspondence rejector used for prerejection */
- CorrespondenceRejectorPolyPtr correspondence_rejector_poly_;
-
- /** \brief The fraction [0,1] of inlier points required for accepting a transformation */
- float inlier_fraction_;
-
- /** \brief Inlier points of final transformation as indices into source */
- std::vector<int> inliers_;
+ return (nr_samples_);
+ }
+
+ /** \brief Set the number of neighbors to use when selecting a random feature
+ * correspondence. A higher value will add more randomness to the feature matching.
+ * \param k the number of neighbors to use when selecting a random feature
+ * correspondence.
+ */
+ inline void
+ setCorrespondenceRandomness(int k)
+ {
+ k_correspondences_ = k;
+ }
+
+ /** \brief Get the number of neighbors used when selecting a random feature
+ * correspondence, as set by the user */
+ inline int
+ getCorrespondenceRandomness() const
+ {
+ return (k_correspondences_);
+ }
+
+ /** \brief Set the similarity threshold in [0,1[ between edge lengths of the
+ * underlying polygonal correspondence rejector object, where 1 is a perfect match
+ * \param similarity_threshold edge length similarity threshold
+ */
+ inline void
+ setSimilarityThreshold(float similarity_threshold)
+ {
+ correspondence_rejector_poly_->setSimilarityThreshold(similarity_threshold);
+ }
+
+ /** \brief Get the similarity threshold between edge lengths of the underlying
+ * polygonal correspondence rejector object, \return edge length similarity threshold
+ */
+ inline float
+ getSimilarityThreshold() const
+ {
+ return correspondence_rejector_poly_->getSimilarityThreshold();
+ }
+
+ /** \brief Set the required inlier fraction (of the input)
+ * \param inlier_fraction required inlier fraction, must be in [0,1]
+ */
+ inline void
+ setInlierFraction(float inlier_fraction)
+ {
+ inlier_fraction_ = inlier_fraction;
+ }
+
+ /** \brief Get the required inlier fraction
+ * \return required inlier fraction in [0,1]
+ */
+ inline float
+ getInlierFraction() const
+ {
+ return inlier_fraction_;
+ }
+
+ /** \brief Get the inlier indices of the source point cloud under the final
+ * transformation
+ * @return inlier indices
+ */
+ inline const pcl::Indices&
+ getInliers() const
+ {
+ return inliers_;
+ }
+
+protected:
+ /** \brief Choose a random index between 0 and n-1
+ * \param n the number of possible indices to choose from
+ */
+ inline int
+ getRandomIndex(int n) const
+ {
+ return (static_cast<int>(n * (rand() / (RAND_MAX + 1.0))));
};
-}
+
+ /** \brief Select \a nr_samples sample points from cloud while making sure that their
+ * pairwise distances are greater than a user-defined minimum distance, \a
+ * min_sample_distance. \param cloud the input point cloud \param nr_samples the
+ * number of samples to select \param sample_indices the resulting sample indices
+ */
+ void
+ selectSamples(const PointCloudSource& cloud,
+ int nr_samples,
+ pcl::Indices& sample_indices);
+
+ /** \brief For each of the sample points, find a list of points in the target cloud
+ * whose features are similar to the sample points' features. From these, select one
+ * randomly which will be considered that sample point's correspondence. \param
+ * sample_indices the indices of each sample point \param similar_features
+ * correspondence cache, which is used to read/write already computed correspondences
+ * \param corresponding_indices the resulting indices of each sample's corresponding
+ * point in the target cloud
+ */
+ void
+ findSimilarFeatures(const pcl::Indices& sample_indices,
+ std::vector<pcl::Indices>& similar_features,
+ pcl::Indices& corresponding_indices);
+
+ /** \brief Rigid transformation computation method.
+ * \param output the transformed input point cloud dataset using the rigid
+ * transformation found \param guess The computed transformation
+ */
+ void
+ computeTransformation(PointCloudSource& output,
+ const Eigen::Matrix4f& guess) override;
+
+ /** \brief Obtain the fitness of a transformation
+ * The following metrics are calculated, based on
+ * \b final_transformation_ and \b corr_dist_threshold_:
+ * - Inliers: the number of transformed points which are closer than threshold to NN
+ * - Error score: the MSE of the inliers
+ * \param inliers indices of source point cloud inliers
+ * \param fitness_score output fitness score as RMSE
+ */
+ void
+ getFitness(pcl::Indices& inliers, float& fitness_score);
+
+ /** \brief The source point cloud's feature descriptors. */
+ FeatureCloudConstPtr input_features_;
+
+ /** \brief The target point cloud's feature descriptors. */
+ FeatureCloudConstPtr target_features_;
+
+ /** \brief The number of samples to use during each iteration. */
+ int nr_samples_;
+
+ /** \brief The number of neighbors to use when selecting a random feature
+ * correspondence. */
+ int k_correspondences_;
+
+ /** \brief The KdTree used to compare feature descriptors. */
+ FeatureKdTreePtr feature_tree_;
+
+ /** \brief The polygonal correspondence rejector used for prerejection */
+ CorrespondenceRejectorPolyPtr correspondence_rejector_poly_;
+
+ /** \brief The fraction [0,1] of inlier points required for accepting a transformation
+ */
+ float inlier_fraction_;
+
+ /** \brief Inlier points of final transformation as indices into source */
+ pcl::Indices inliers_;
+};
+} // namespace pcl
#include <pcl/registration/impl/sample_consensus_prerejective.hpp>
#pragma once
-#include <pcl/correspondence.h>
#include <pcl/common/transforms.h>
#include <pcl/registration/correspondence_types.h>
+#include <pcl/correspondence.h>
-namespace pcl
-{
- namespace registration
- {
- /** \brief TransformationEstimation represents the base class for methods for transformation estimation based on:
- * - correspondence vectors
- * - two point clouds (source and target) of the same size
- * - a point cloud with a set of indices (source), and another point cloud (target)
- * - two point clouds with two sets of indices (source and target) of the same size
- *
- * \note The class is templated on the source and target point types as well as on the output scalar of the transformation matrix (i.e., float or double). Default: float.
- * \author Dirk Holz, Radu B. Rusu
- * \ingroup registration
- */
- template <typename PointSource, typename PointTarget, typename Scalar = float>
- class TransformationEstimation
- {
- public:
- using Matrix4 = Eigen::Matrix<Scalar, 4, 4>;
-
- TransformationEstimation () {};
- virtual ~TransformationEstimation () {};
+namespace pcl {
+namespace registration {
+/** \brief TransformationEstimation represents the base class for methods for
+ * transformation estimation based on:
+ * - correspondence vectors
+ * - two point clouds (source and target) of the same size
+ * - a point cloud with a set of indices (source), and another point cloud (target)
+ * - two point clouds with two sets of indices (source and target) of the same size
+ *
+ * \note The class is templated on the source and target point types as well as on the
+ * output scalar of the transformation matrix (i.e., float or double). Default: float.
+ * \author Dirk Holz, Radu B. Rusu
+ * \ingroup registration
+ */
+template <typename PointSource, typename PointTarget, typename Scalar = float>
+class TransformationEstimation {
+public:
+ using Matrix4 = Eigen::Matrix<Scalar, 4, 4>;
- /** \brief Estimate a rigid rotation transformation between a source and a target point cloud.
- * \param[in] cloud_src the source point cloud dataset
- * \param[in] cloud_tgt the target point cloud dataset
- * \param[out] transformation_matrix the resultant transformation matrix
- */
- virtual void
- estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- Matrix4 &transformation_matrix) const = 0;
+ TransformationEstimation(){};
+ virtual ~TransformationEstimation(){};
- /** \brief Estimate a rigid rotation transformation between a source and a target point cloud.
- * \param[in] cloud_src the source point cloud dataset
- * \param[in] indices_src the vector of indices describing the points of interest in \a cloud_src
- * \param[in] cloud_tgt the target point cloud dataset
- * \param[out] transformation_matrix the resultant transformation matrix
- */
- virtual void
- estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const std::vector<int> &indices_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- Matrix4 &transformation_matrix) const = 0;
+ /** \brief Estimate a rigid rotation transformation between a source and a target
+ * point cloud. \param[in] cloud_src the source point cloud dataset \param[in]
+ * cloud_tgt the target point cloud dataset \param[out] transformation_matrix the
+ * resultant transformation matrix
+ */
+ virtual void
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ Matrix4& transformation_matrix) const = 0;
- /** \brief Estimate a rigid rotation transformation between a source and a target point cloud.
- * \param[in] cloud_src the source point cloud dataset
- * \param[in] indices_src the vector of indices describing the points of interest in \a cloud_src
- * \param[in] cloud_tgt the target point cloud dataset
- * \param[in] indices_tgt the vector of indices describing the correspondences of the interest points from \a indices_src
- * \param[out] transformation_matrix the resultant transformation matrix
- */
- virtual void
- estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const std::vector<int> &indices_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- const std::vector<int> &indices_tgt,
- Matrix4 &transformation_matrix) const = 0;
+ /** \brief Estimate a rigid rotation transformation between a source and a target
+ * point cloud. \param[in] cloud_src the source point cloud dataset \param[in]
+ * indices_src the vector of indices describing the points of interest in \a cloud_src
+ * \param[in] cloud_tgt the target point cloud dataset
+ * \param[out] transformation_matrix the resultant transformation matrix
+ */
+ virtual void
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::Indices& indices_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ Matrix4& transformation_matrix) const = 0;
- /** \brief Estimate a rigid rotation transformation between a source and a target point cloud.
- * \param[in] cloud_src the source point cloud dataset
- * \param[in] cloud_tgt the target point cloud dataset
- * \param[in] correspondences the vector of correspondences between source and target point cloud
- * \param[out] transformation_matrix the resultant transformation matrix
- */
- virtual void
- estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- const pcl::Correspondences &correspondences,
- Matrix4 &transformation_matrix) const = 0;
+ /** \brief Estimate a rigid rotation transformation between a source and a target
+ * point cloud. \param[in] cloud_src the source point cloud dataset \param[in]
+ * indices_src the vector of indices describing the points of interest in \a cloud_src
+ * \param[in] cloud_tgt the target point cloud dataset
+ * \param[in] indices_tgt the vector of indices describing the correspondences of the
+ * interest points from \a indices_src
+ * \param[out] transformation_matrix the resultant transformation matrix
+ */
+ virtual void
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::Indices& indices_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ const pcl::Indices& indices_tgt,
+ Matrix4& transformation_matrix) const = 0;
+ /** \brief Estimate a rigid rotation transformation between a source and a target
+ * point cloud. \param[in] cloud_src the source point cloud dataset \param[in]
+ * cloud_tgt the target point cloud dataset \param[in] correspondences the vector of
+ * correspondences between source and target point cloud \param[out]
+ * transformation_matrix the resultant transformation matrix
+ */
+ virtual void
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ const pcl::Correspondences& correspondences,
+ Matrix4& transformation_matrix) const = 0;
- using Ptr = shared_ptr<TransformationEstimation<PointSource, PointTarget, Scalar> >;
- using ConstPtr = shared_ptr<const TransformationEstimation<PointSource, PointTarget, Scalar> >;
- };
- }
-}
+ using Ptr = shared_ptr<TransformationEstimation<PointSource, PointTarget, Scalar>>;
+ using ConstPtr =
+ shared_ptr<const TransformationEstimation<PointSource, PointTarget, Scalar>>;
+};
+} // namespace registration
+} // namespace pcl
#include <pcl/registration/transformation_estimation.h>
-namespace pcl
-{
- namespace registration
- {
- /** @b TransformationEstimation2D implements a simple 2D rigid transformation
- * estimation (x, y, theta) for a given pair of datasets.
- *
- * The two datasets should already be transformed so that the reference plane
- * equals z = 0.
- *
- * \note The class is templated on the source and target point types as well as on the output scalar of the transformation matrix (i.e., float or double). Default: float.
- *
- * \author Suat Gedikli
- * \ingroup registration
- */
- template <typename PointSource, typename PointTarget, typename Scalar = float>
- class TransformationEstimation2D : public TransformationEstimation<PointSource, PointTarget, Scalar>
- {
- public:
- using Ptr = shared_ptr<TransformationEstimation2D<PointSource, PointTarget, Scalar> >;
- using ConstPtr = shared_ptr<const TransformationEstimation2D<PointSource, PointTarget, Scalar> >;
-
- using Matrix4 = typename TransformationEstimation<PointSource, PointTarget, Scalar>::Matrix4;
+namespace pcl {
+namespace registration {
+/** @b TransformationEstimation2D implements a simple 2D rigid transformation
+ * estimation (x, y, theta) for a given pair of datasets.
+ *
+ * The two datasets should already be transformed so that the reference plane
+ * equals z = 0.
+ *
+ * \note The class is templated on the source and target point types as well as on the
+ * output scalar of the transformation matrix (i.e., float or double). Default: float.
+ *
+ * \author Suat Gedikli
+ * \ingroup registration
+ */
+template <typename PointSource, typename PointTarget, typename Scalar = float>
+class TransformationEstimation2D
+: public TransformationEstimation<PointSource, PointTarget, Scalar> {
+public:
+ using Ptr = shared_ptr<TransformationEstimation2D<PointSource, PointTarget, Scalar>>;
+ using ConstPtr =
+ shared_ptr<const TransformationEstimation2D<PointSource, PointTarget, Scalar>>;
- TransformationEstimation2D () {};
- virtual ~TransformationEstimation2D () {};
+ using Matrix4 =
+ typename TransformationEstimation<PointSource, PointTarget, Scalar>::Matrix4;
- /** \brief Estimate a rigid transformation between a source and a target point cloud in 2D.
- * \param[in] cloud_src the source point cloud dataset
- * \param[in] cloud_tgt the target point cloud dataset
- * \param[out] transformation_matrix the resultant transformation matrix
- */
- inline void
- estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- Matrix4 &transformation_matrix) const;
+ TransformationEstimation2D(){};
+ virtual ~TransformationEstimation2D(){};
- /** \brief Estimate a rigid transformation between a source and a target point cloud in 2D.
- * \param[in] cloud_src the source point cloud dataset
- * \param[in] indices_src the vector of indices describing the points of interest in \a cloud_src
- * \param[in] cloud_tgt the target point cloud dataset
- * \param[out] transformation_matrix the resultant transformation matrix
- */
- inline void
- estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const std::vector<int> &indices_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- Matrix4 &transformation_matrix) const;
+ /** \brief Estimate a rigid transformation between a source and a target point cloud
+ * in 2D. \param[in] cloud_src the source point cloud dataset \param[in] cloud_tgt the
+ * target point cloud dataset \param[out] transformation_matrix the resultant
+ * transformation matrix
+ */
+ inline void
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ Matrix4& transformation_matrix) const;
- /** \brief Estimate a rigid transformation between a source and a target point cloud in 2D.
- * \param[in] cloud_src the source point cloud dataset
- * \param[in] indices_src the vector of indices describing the points of interest in \a cloud_src
- * \param[in] cloud_tgt the target point cloud dataset
- * \param[in] indices_tgt the vector of indices describing the correspondences of the interest points from \a indices_src
- * \param[out] transformation_matrix the resultant transformation matrix
- */
- virtual void
- estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const std::vector<int> &indices_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- const std::vector<int> &indices_tgt,
- Matrix4 &transformation_matrix) const;
+ /** \brief Estimate a rigid transformation between a source and a target point cloud
+ * in 2D. \param[in] cloud_src the source point cloud dataset \param[in] indices_src
+ * the vector of indices describing the points of interest in \a cloud_src
+ * \param[in] cloud_tgt the target point cloud dataset
+ * \param[out] transformation_matrix the resultant transformation matrix
+ */
+ inline void
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::Indices& indices_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ Matrix4& transformation_matrix) const;
- /** \brief Estimate a rigid transformation between a source and a target point cloud in 2D.
- * \param[in] cloud_src the source point cloud dataset
- * \param[in] cloud_tgt the target point cloud dataset
- * \param[in] correspondences the vector of correspondences between source and target point cloud
- * \param[out] transformation_matrix the resultant transformation matrix
- */
- virtual void
- estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- const pcl::Correspondences &correspondences,
- Matrix4 &transformation_matrix) const;
+ /** \brief Estimate a rigid transformation between a source and a target point cloud
+ * in 2D. \param[in] cloud_src the source point cloud dataset \param[in] indices_src
+ * the vector of indices describing the points of interest in \a cloud_src
+ * \param[in] cloud_tgt the target point cloud dataset
+ * \param[in] indices_tgt the vector of indices describing the correspondences of the
+ * interest points from \a indices_src
+ * \param[out] transformation_matrix the resultant transformation matrix
+ */
+ virtual void
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::Indices& indices_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ const pcl::Indices& indices_tgt,
+ Matrix4& transformation_matrix) const;
- protected:
+ /** \brief Estimate a rigid transformation between a source and a target point cloud
+ * in 2D. \param[in] cloud_src the source point cloud dataset \param[in] cloud_tgt the
+ * target point cloud dataset \param[in] correspondences the vector of correspondences
+ * between source and target point cloud \param[out] transformation_matrix the
+ * resultant transformation matrix
+ */
+ virtual void
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ const pcl::Correspondences& correspondences,
+ Matrix4& transformation_matrix) const;
- /** \brief Estimate a rigid rotation transformation between a source and a target
- * \param[in] source_it an iterator over the source point cloud dataset
- * \param[in] target_it an iterator over the target point cloud dataset
- * \param[out] transformation_matrix the resultant transformation matrix
- */
- void
- estimateRigidTransformation (ConstCloudIterator<PointSource>& source_it,
- ConstCloudIterator<PointTarget>& target_it,
- Matrix4 &transformation_matrix) const;
+protected:
+ /** \brief Estimate a rigid rotation transformation between a source and a target
+ * \param[in] source_it an iterator over the source point cloud dataset
+ * \param[in] target_it an iterator over the target point cloud dataset
+ * \param[out] transformation_matrix the resultant transformation matrix
+ */
+ void
+ estimateRigidTransformation(ConstCloudIterator<PointSource>& source_it,
+ ConstCloudIterator<PointTarget>& target_it,
+ Matrix4& transformation_matrix) const;
- /** \brief Obtain a 4x4 rigid transformation matrix from a correlation matrix H = src * tgt'
- * \param[in] cloud_src_demean the input source cloud, demeaned, in Eigen format
- * \param[in] centroid_src the input source centroid, in Eigen format
- * \param[in] cloud_tgt_demean the input target cloud, demeaned, in Eigen format
- * \param[in] centroid_tgt the input target cloud, in Eigen format
- * \param[out] transformation_matrix the resultant 4x4 rigid transformation matrix
- */
- void
- getTransformationFromCorrelation (
- const Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> &cloud_src_demean,
- const Eigen::Matrix<Scalar, 4, 1> ¢roid_src,
- const Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> &cloud_tgt_demean,
- const Eigen::Matrix<Scalar, 4, 1> ¢roid_tgt,
- Matrix4 &transformation_matrix) const;
- };
+ /** \brief Obtain a 4x4 rigid transformation matrix from a correlation matrix H = src
+ * * tgt' \param[in] cloud_src_demean the input source cloud, demeaned, in Eigen
+ * format \param[in] centroid_src the input source centroid, in Eigen format
+ * \param[in] cloud_tgt_demean the input target cloud, demeaned, in Eigen format
+ * \param[in] centroid_tgt the input target cloud, in Eigen format
+ * \param[out] transformation_matrix the resultant 4x4 rigid transformation matrix
+ */
+ void
+ getTransformationFromCorrelation(
+ const Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>& cloud_src_demean,
+ const Eigen::Matrix<Scalar, 4, 1>& centroid_src,
+ const Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>& cloud_tgt_demean,
+ const Eigen::Matrix<Scalar, 4, 1>& centroid_tgt,
+ Matrix4& transformation_matrix) const;
+};
- }
-}
+} // namespace registration
+} // namespace pcl
#include <pcl/registration/impl/transformation_estimation_2D.hpp>
#include <pcl/registration/transformation_estimation.h>
-namespace pcl
-{
- namespace registration
- {
- /** \brief TransformationEstimation3Points represents the class for transformation estimation based on:
- * - correspondence vectors of 3 pairs (planar case)
- * - two point clouds (source and target) of size 3
- * - a point cloud with a set of 3 indices (source), and another point cloud (target)
- * - two point clouds with two sets of indices (source and target) of the size 3
- *
- * \note The class is templated on the source and target point types as well as on the output scalar of the transformation matrix
- * (i.e., float or double). Default: float.
- *
- * \author P.W.Theiler
- * \ingroup registration
- */
- template <typename PointSource, typename PointTarget, typename Scalar = float>
- class TransformationEstimation3Point : public TransformationEstimation <PointSource, PointTarget, Scalar>
- {
- public:
- /** \cond */
- using Ptr = shared_ptr<TransformationEstimation3Point<PointSource, PointTarget, Scalar> >;
- using ConstPtr = shared_ptr<const TransformationEstimation3Point<PointSource, PointTarget, Scalar> >;
- using Matrix4 = typename TransformationEstimation<PointSource, PointTarget, Scalar>::Matrix4;
- /** \endcond */
-
- /** \brief Constructor */
- TransformationEstimation3Point ()
- {};
+namespace pcl {
+namespace registration {
+/** \brief TransformationEstimation3Points represents the class for transformation
+ * estimation based on:
+ * - correspondence vectors of 3 pairs (planar case)
+ * - two point clouds (source and target) of size 3
+ * - a point cloud with a set of 3 indices (source), and another point cloud (target)
+ * - two point clouds with two sets of indices (source and target) of the size 3
+ *
+ * \note The class is templated on the source and target point types as well as on the
+ * output scalar of the transformation matrix (i.e., float or double). Default: float.
+ *
+ * \author P.W.Theiler
+ * \ingroup registration
+ */
+template <typename PointSource, typename PointTarget, typename Scalar = float>
+class TransformationEstimation3Point
+: public TransformationEstimation<PointSource, PointTarget, Scalar> {
+public:
+ /** \cond */
+ using Ptr =
+ shared_ptr<TransformationEstimation3Point<PointSource, PointTarget, Scalar>>;
+ using ConstPtr = shared_ptr<
+ const TransformationEstimation3Point<PointSource, PointTarget, Scalar>>;
+ using Matrix4 =
+ typename TransformationEstimation<PointSource, PointTarget, Scalar>::Matrix4;
+ /** \endcond */
- /** \brief Destructor */
- ~TransformationEstimation3Point ()
- {};
+ /** \brief Constructor */
+ TransformationEstimation3Point(){};
- /** \brief Estimate a rigid rotation transformation between a source and a target point cloud.
- * \param[in] cloud_src the source point cloud dataset
- * \param[in] cloud_tgt the target point cloud dataset
- * \param[out] transformation_matrix the resultant transformation matrix
- */
- void
- estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- Matrix4 &transformation_matrix) const override;
+ /** \brief Destructor */
+ ~TransformationEstimation3Point(){};
- /** \brief Estimate a rigid rotation transformation between a source and a target point cloud.
- * \param[in] cloud_src the source point cloud dataset
- * \param[in] indices_src the vector of indices describing the points of interest in \a cloud_src
- * \param[in] cloud_tgt the target point cloud dataset
- * \param[out] transformation_matrix the resultant transformation matrix
- */
- void
- estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const std::vector<int> &indices_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- Matrix4 &transformation_matrix) const override;
+ /** \brief Estimate a rigid rotation transformation between a source and a target
+ * point cloud. \param[in] cloud_src the source point cloud dataset \param[in]
+ * cloud_tgt the target point cloud dataset \param[out] transformation_matrix the
+ * resultant transformation matrix
+ */
+ void
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ Matrix4& transformation_matrix) const override;
- /** \brief Estimate a rigid rotation transformation between a source and a target point cloud.
- * \param[in] cloud_src the source point cloud dataset
- * \param[in] indices_src the vector of indices describing the points of interest in \a cloud_src
- * \param[in] cloud_tgt the target point cloud dataset
- * \param[in] indices_tgt the vector of indices describing the correspondences of the interest points from \a indices_src
- * \param[out] transformation_matrix the resultant transformation matrix
- */
- void
- estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const std::vector<int> &indices_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- const std::vector<int> &indices_tgt,
- Matrix4 &transformation_matrix) const override;
+ /** \brief Estimate a rigid rotation transformation between a source and a target
+ * point cloud. \param[in] cloud_src the source point cloud dataset \param[in]
+ * indices_src the vector of indices describing the points of interest in \a cloud_src
+ * \param[in] cloud_tgt the target point cloud dataset
+ * \param[out] transformation_matrix the resultant transformation matrix
+ */
+ void
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::Indices& indices_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ Matrix4& transformation_matrix) const override;
- /** \brief Estimate a rigid rotation transformation between a source and a target point cloud.
- * \param[in] cloud_src the source point cloud dataset
- * \param[in] cloud_tgt the target point cloud dataset
- * \param[in] correspondences the vector of correspondences between source and target point cloud
- * \param[out] transformation_matrix the resultant transformation matrix
- */
- void
- estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- const pcl::Correspondences &correspondences,
- Matrix4 &transformation_matrix) const override;
+ /** \brief Estimate a rigid rotation transformation between a source and a target
+ * point cloud. \param[in] cloud_src the source point cloud dataset \param[in]
+ * indices_src the vector of indices describing the points of interest in \a cloud_src
+ * \param[in] cloud_tgt the target point cloud dataset
+ * \param[in] indices_tgt the vector of indices describing the correspondences of the
+ * interest points from \a indices_src
+ * \param[out] transformation_matrix the resultant transformation matrix
+ */
+ void
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::Indices& indices_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ const pcl::Indices& indices_tgt,
+ Matrix4& transformation_matrix) const override;
- protected:
+ /** \brief Estimate a rigid rotation transformation between a source and a target
+ * point cloud. \param[in] cloud_src the source point cloud dataset \param[in]
+ * cloud_tgt the target point cloud dataset \param[in] correspondences the vector of
+ * correspondences between source and target point cloud \param[out]
+ * transformation_matrix the resultant transformation matrix
+ */
+ void
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ const pcl::Correspondences& correspondences,
+ Matrix4& transformation_matrix) const override;
- /** \brief Estimate a rigid rotation transformation between a source and a target
- * \param[in] source_it an iterator over the source point cloud dataset
- * \param[in] target_it an iterator over the target point cloud dataset
- * \param[out] transformation_matrix the resultant transformation matrix
- */
- void
- estimateRigidTransformation (ConstCloudIterator<PointSource>& source_it,
- ConstCloudIterator<PointTarget>& target_it,
- Matrix4 &transformation_matrix) const;
- };
- }; // namespace registration
-}; // namespace registration
+protected:
+ /** \brief Estimate a rigid rotation transformation between a source and a target
+ * \param[in] source_it an iterator over the source point cloud dataset
+ * \param[in] target_it an iterator over the target point cloud dataset
+ * \param[out] transformation_matrix the resultant transformation matrix
+ */
+ void
+ estimateRigidTransformation(ConstCloudIterator<PointSource>& source_it,
+ ConstCloudIterator<PointTarget>& target_it,
+ Matrix4& transformation_matrix) const;
+};
+}; // namespace registration
+}; // namespace pcl
#include <pcl/registration/impl/transformation_estimation_3point.hpp>
#include <pcl/registration/transformation_estimation.h>
#include <pcl/cloud_iterator.h>
+PCL_DEPRECATED_HEADER(1,
+ 15,
+ "TransformationEstimationDQ has been renamed to "
+ "TransformationEstimationDualQuaternion.");
-namespace pcl
-{
- namespace registration
- {
- /** @b TransformationEstimationDQ implements dual quaternion based estimation of
- * the transformation aligning the given correspondences.
- *
- * \note The class is templated on the source and target point types as well as on the output scalar of the transformation matrix (i.e., float or double). Default: float.
- * \author Sergey Zagoruyko
- * \ingroup registration
- */
- template <typename PointSource, typename PointTarget, typename Scalar = float>
- class TransformationEstimationDQ : public TransformationEstimation<PointSource, PointTarget, Scalar>
- {
- public:
- using Ptr = shared_ptr<TransformationEstimationDQ<PointSource, PointTarget, Scalar> >;
- using ConstPtr = shared_ptr<const TransformationEstimationDQ<PointSource, PointTarget, Scalar> >;
-
- using Matrix4 = typename TransformationEstimation<PointSource, PointTarget, Scalar>::Matrix4;
+namespace pcl {
+namespace registration {
+/** @b TransformationEstimationDQ implements dual quaternion based estimation of
+ * the transformation aligning the given correspondences.
+ *
+ * \note The class is templated on the source and target point types as well as on the
+ * output scalar of the transformation matrix (i.e., float or double). Default: float.
+ * \author Sergey Zagoruyko
+ * \ingroup registration
+ */
+template <typename PointSource, typename PointTarget, typename Scalar = float>
+class TransformationEstimationDQ
+: public TransformationEstimation<PointSource, PointTarget, Scalar> {
+public:
+ using Ptr = shared_ptr<TransformationEstimationDQ<PointSource, PointTarget, Scalar>>;
+ using ConstPtr =
+ shared_ptr<const TransformationEstimationDQ<PointSource, PointTarget, Scalar>>;
- TransformationEstimationDQ () {};
- virtual ~TransformationEstimationDQ () {};
+ using Matrix4 =
+ typename TransformationEstimation<PointSource, PointTarget, Scalar>::Matrix4;
- /** \brief Estimate a rigid rotation transformation between a source and a target point cloud using
- * dual quaternion optimization
- * \param[in] cloud_src the source point cloud dataset
- * \param[in] cloud_tgt the target point cloud dataset
- * \param[out] transformation_matrix the resultant transformation matrix
- */
- inline void
- estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- Matrix4 &transformation_matrix) const;
+ TransformationEstimationDQ(){};
+ virtual ~TransformationEstimationDQ(){};
- /** \brief Estimate a rigid rotation transformation between a source and a target point cloud using
- * dual quaternion optimization
- * \param[in] cloud_src the source point cloud dataset
- * \param[in] indices_src the vector of indices describing the points of interest in \a cloud_src
- * \param[in] cloud_tgt the target point cloud dataset
- * \param[out] transformation_matrix the resultant transformation matrix
- */
- inline void
- estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const std::vector<int> &indices_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- Matrix4 &transformation_matrix) const;
+ /** \brief Estimate a rigid rotation transformation between a source and a target
+ * point cloud using dual quaternion optimization \param[in] cloud_src the source
+ * point cloud dataset \param[in] cloud_tgt the target point cloud dataset \param[out]
+ * transformation_matrix the resultant transformation matrix
+ */
+ inline void
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ Matrix4& transformation_matrix) const;
- /** \brief Estimate a rigid rotation transformation between a source and a target point cloud using
- * dual quaternion optimization
- * \param[in] cloud_src the source point cloud dataset
- * \param[in] indices_src the vector of indices describing the points of interest in \a cloud_src
- * \param[in] cloud_tgt the target point cloud dataset
- * \param[in] indices_tgt the vector of indices describing the correspondences of the interest points from \a indices_src
- * \param[out] transformation_matrix the resultant transformation matrix
- */
- inline void
- estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const std::vector<int> &indices_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- const std::vector<int> &indices_tgt,
- Matrix4 &transformation_matrix) const;
+ /** \brief Estimate a rigid rotation transformation between a source and a target
+ * point cloud using dual quaternion optimization \param[in] cloud_src the source
+ * point cloud dataset \param[in] indices_src the vector of indices describing the
+ * points of interest in \a cloud_src
+ * \param[in] cloud_tgt the target point cloud dataset
+ * \param[out] transformation_matrix the resultant transformation matrix
+ */
+ inline void
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::Indices& indices_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ Matrix4& transformation_matrix) const;
- /** \brief Estimate a rigid rotation transformation between a source and a target point cloud using
- * dual quaternion optimization
- * \param[in] cloud_src the source point cloud dataset
- * \param[in] cloud_tgt the target point cloud dataset
- * \param[in] correspondences the vector of correspondences between source and target point cloud
- * \param[out] transformation_matrix the resultant transformation matrix
- */
- void
- estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- const pcl::Correspondences &correspondences,
- Matrix4 &transformation_matrix) const;
+ /** \brief Estimate a rigid rotation transformation between a source and a target
+ * point cloud using dual quaternion optimization \param[in] cloud_src the source
+ * point cloud dataset \param[in] indices_src the vector of indices describing the
+ * points of interest in \a cloud_src
+ * \param[in] cloud_tgt the target point cloud dataset
+ * \param[in] indices_tgt the vector of indices describing the correspondences of the
+ * interest points from \a indices_src
+ * \param[out] transformation_matrix the resultant transformation matrix
+ */
+ inline void
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::Indices& indices_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ const pcl::Indices& indices_tgt,
+ Matrix4& transformation_matrix) const;
- protected:
+ /** \brief Estimate a rigid rotation transformation between a source and a target
+ * point cloud using dual quaternion optimization \param[in] cloud_src the source
+ * point cloud dataset \param[in] cloud_tgt the target point cloud dataset \param[in]
+ * correspondences the vector of correspondences between source and target point cloud
+ * \param[out] transformation_matrix the resultant transformation matrix
+ */
+ void
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ const pcl::Correspondences& correspondences,
+ Matrix4& transformation_matrix) const;
- /** \brief Estimate a rigid rotation transformation between a source and a target
- * \param[in] source_it an iterator over the source point cloud dataset
- * \param[in] target_it an iterator over the target point cloud dataset
- * \param[out] transformation_matrix the resultant transformation matrix
- */
- void
- estimateRigidTransformation (ConstCloudIterator<PointSource>& source_it,
- ConstCloudIterator<PointTarget>& target_it,
- Matrix4 &transformation_matrix) const;
- };
+protected:
+ /** \brief Estimate a rigid rotation transformation between a source and a target
+ * \param[in] source_it an iterator over the source point cloud dataset
+ * \param[in] target_it an iterator over the target point cloud dataset
+ * \param[out] transformation_matrix the resultant transformation matrix
+ */
+ void
+ estimateRigidTransformation(ConstCloudIterator<PointSource>& source_it,
+ ConstCloudIterator<PointTarget>& target_it,
+ Matrix4& transformation_matrix) const;
+};
- }
-}
+} // namespace registration
+} // namespace pcl
#include <pcl/registration/impl/transformation_estimation_dq.hpp>
#include <pcl/registration/transformation_estimation.h>
#include <pcl/cloud_iterator.h>
-namespace pcl
-{
- namespace registration
- {
- /** @b TransformationEstimationDualQuaternion implements dual quaternion based estimation of
- * the transformation aligning the given correspondences.
- *
- * \note The class is templated on the source and target point types as well as on the output scalar of the transformation matrix (i.e., float or double). Default: float.
- * \author Sergey Zagoruyko
- * \ingroup registration
- */
- template <typename PointSource, typename PointTarget, typename Scalar = float>
- class TransformationEstimationDualQuaternion : public TransformationEstimation<PointSource, PointTarget, Scalar>
- {
- public:
- using Ptr = shared_ptr<TransformationEstimationDualQuaternion<PointSource, PointTarget, Scalar> >;
- using ConstPtr = shared_ptr<const TransformationEstimationDualQuaternion<PointSource, PointTarget, Scalar> >;
-
- using Matrix4 = typename TransformationEstimation<PointSource, PointTarget, Scalar>::Matrix4;
+namespace pcl {
+namespace registration {
+/** @b TransformationEstimationDualQuaternion implements dual quaternion based
+ * estimation of the transformation aligning the given correspondences.
+ *
+ * \note The class is templated on the source and target point types as well as on the
+ * output scalar of the transformation matrix (i.e., float or double). Default: float.
+ * \author Sergey Zagoruyko
+ * \ingroup registration
+ */
+template <typename PointSource, typename PointTarget, typename Scalar = float>
+class TransformationEstimationDualQuaternion
+: public TransformationEstimation<PointSource, PointTarget, Scalar> {
+public:
+ using Ptr = shared_ptr<
+ TransformationEstimationDualQuaternion<PointSource, PointTarget, Scalar>>;
+ using ConstPtr = shared_ptr<
+ const TransformationEstimationDualQuaternion<PointSource, PointTarget, Scalar>>;
- TransformationEstimationDualQuaternion () {};
- ~TransformationEstimationDualQuaternion () {};
+ using Matrix4 =
+ typename TransformationEstimation<PointSource, PointTarget, Scalar>::Matrix4;
- /** \brief Estimate a rigid rotation transformation between a source and a target point cloud using
- * dual quaternion optimization
- * \param[in] cloud_src the source point cloud dataset
- * \param[in] cloud_tgt the target point cloud dataset
- * \param[out] transformation_matrix the resultant transformation matrix
- */
- inline void
- estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- Matrix4 &transformation_matrix) const override;
+ TransformationEstimationDualQuaternion(){};
+ ~TransformationEstimationDualQuaternion(){};
- /** \brief Estimate a rigid rotation transformation between a source and a target point cloud using
- * dual quaternion optimization
- * \param[in] cloud_src the source point cloud dataset
- * \param[in] indices_src the vector of indices describing the points of interest in \a cloud_src
- * \param[in] cloud_tgt the target point cloud dataset
- * \param[out] transformation_matrix the resultant transformation matrix
- */
- inline void
- estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const std::vector<int> &indices_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- Matrix4 &transformation_matrix) const override;
+ /** \brief Estimate a rigid rotation transformation between a source and a target
+ * point cloud using dual quaternion optimization \param[in] cloud_src the source
+ * point cloud dataset \param[in] cloud_tgt the target point cloud dataset \param[out]
+ * transformation_matrix the resultant transformation matrix
+ */
+ inline void
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ Matrix4& transformation_matrix) const override;
- /** \brief Estimate a rigid rotation transformation between a source and a target point cloud using
- * dual quaternion optimization
- * \param[in] cloud_src the source point cloud dataset
- * \param[in] indices_src the vector of indices describing the points of interest in \a cloud_src
- * \param[in] cloud_tgt the target point cloud dataset
- * \param[in] indices_tgt the vector of indices describing the correspondences of the interest points from \a indices_src
- * \param[out] transformation_matrix the resultant transformation matrix
- */
- inline void
- estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const std::vector<int> &indices_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- const std::vector<int> &indices_tgt,
- Matrix4 &transformation_matrix) const override;
+ /** \brief Estimate a rigid rotation transformation between a source and a target
+ * point cloud using dual quaternion optimization \param[in] cloud_src the source
+ * point cloud dataset \param[in] indices_src the vector of indices describing the
+ * points of interest in \a cloud_src
+ * \param[in] cloud_tgt the target point cloud dataset
+ * \param[out] transformation_matrix the resultant transformation matrix
+ */
+ inline void
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::Indices& indices_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ Matrix4& transformation_matrix) const override;
- /** \brief Estimate a rigid rotation transformation between a source and a target point cloud using
- * dual quaternion optimization
- * \param[in] cloud_src the source point cloud dataset
- * \param[in] cloud_tgt the target point cloud dataset
- * \param[in] correspondences the vector of correspondences between source and target point cloud
- * \param[out] transformation_matrix the resultant transformation matrix
- */
- void
- estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- const pcl::Correspondences &correspondences,
- Matrix4 &transformation_matrix) const override;
+ /** \brief Estimate a rigid rotation transformation between a source and a target
+ * point cloud using dual quaternion optimization \param[in] cloud_src the source
+ * point cloud dataset \param[in] indices_src the vector of indices describing the
+ * points of interest in \a cloud_src
+ * \param[in] cloud_tgt the target point cloud dataset
+ * \param[in] indices_tgt the vector of indices describing the correspondences of the
+ * interest points from \a indices_src
+ * \param[out] transformation_matrix the resultant transformation matrix
+ */
+ inline void
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::Indices& indices_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ const pcl::Indices& indices_tgt,
+ Matrix4& transformation_matrix) const override;
- protected:
+ /** \brief Estimate a rigid rotation transformation between a source and a target
+ * point cloud using dual quaternion optimization \param[in] cloud_src the source
+ * point cloud dataset \param[in] cloud_tgt the target point cloud dataset \param[in]
+ * correspondences the vector of correspondences between source and target point cloud
+ * \param[out] transformation_matrix the resultant transformation matrix
+ */
+ void
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ const pcl::Correspondences& correspondences,
+ Matrix4& transformation_matrix) const override;
- /** \brief Estimate a rigid rotation transformation between a source and a target
- * \param[in] source_it an iterator over the source point cloud dataset
- * \param[in] target_it an iterator over the target point cloud dataset
- * \param[out] transformation_matrix the resultant transformation matrix
- */
- void
- estimateRigidTransformation (ConstCloudIterator<PointSource>& source_it,
- ConstCloudIterator<PointTarget>& target_it,
- Matrix4 &transformation_matrix) const;
- };
+protected:
+ /** \brief Estimate a rigid rotation transformation between a source and a target
+ * \param[in] source_it an iterator over the source point cloud dataset
+ * \param[in] target_it an iterator over the target point cloud dataset
+ * \param[out] transformation_matrix the resultant transformation matrix
+ */
+ void
+ estimateRigidTransformation(ConstCloudIterator<PointSource>& source_it,
+ ConstCloudIterator<PointTarget>& target_it,
+ Matrix4& transformation_matrix) const;
+};
- }
-}
+} // namespace registration
+} // namespace pcl
#include <pcl/registration/impl/transformation_estimation_dual_quaternion.hpp>
#pragma once
-#include <pcl/memory.h>
#include <pcl/registration/transformation_estimation.h>
#include <pcl/registration/warp_point_rigid.h>
+#include <pcl/memory.h>
-namespace pcl
-{
- namespace registration
+namespace pcl {
+namespace registration {
+/** @b TransformationEstimationLM implements Levenberg Marquardt-based
+ * estimation of the transformation aligning the given correspondences.
+ *
+ * \note The class is templated on the source and target point types as well as on the
+ * output scalar of the transformation matrix (i.e., float or double). Default: float.
+ * \author Radu B. Rusu
+ * \ingroup registration
+ */
+template <typename PointSource, typename PointTarget, typename MatScalar = float>
+class TransformationEstimationLM
+: public TransformationEstimation<PointSource, PointTarget, MatScalar> {
+ using PointCloudSource = pcl::PointCloud<PointSource>;
+ using PointCloudSourcePtr = typename PointCloudSource::Ptr;
+ using PointCloudSourceConstPtr = typename PointCloudSource::ConstPtr;
+
+ using PointCloudTarget = pcl::PointCloud<PointTarget>;
+
+ using PointIndicesPtr = PointIndices::Ptr;
+ using PointIndicesConstPtr = PointIndices::ConstPtr;
+
+public:
+ using Ptr =
+ shared_ptr<TransformationEstimationLM<PointSource, PointTarget, MatScalar>>;
+ using ConstPtr =
+ shared_ptr<const TransformationEstimationLM<PointSource, PointTarget, MatScalar>>;
+
+ using VectorX = Eigen::Matrix<MatScalar, Eigen::Dynamic, 1>;
+ using Vector4 = Eigen::Matrix<MatScalar, 4, 1>;
+ using Matrix4 =
+ typename TransformationEstimation<PointSource, PointTarget, MatScalar>::Matrix4;
+
+ /** \brief Constructor. */
+ TransformationEstimationLM();
+
+ /** \brief Copy constructor.
+ * \param[in] src the TransformationEstimationLM object to copy into this
+ */
+ TransformationEstimationLM(const TransformationEstimationLM& src)
+ : tmp_src_(src.tmp_src_)
+ , tmp_tgt_(src.tmp_tgt_)
+ , tmp_idx_src_(src.tmp_idx_src_)
+ , tmp_idx_tgt_(src.tmp_idx_tgt_)
+ , warp_point_(src.warp_point_){};
+
+ /** \brief Copy operator.
+ * \param[in] src the TransformationEstimationLM object to copy into this
+ */
+ TransformationEstimationLM&
+ operator=(const TransformationEstimationLM& src)
{
- /** @b TransformationEstimationLM implements Levenberg Marquardt-based
- * estimation of the transformation aligning the given correspondences.
- *
- * \note The class is templated on the source and target point types as well as on the output scalar of the transformation matrix (i.e., float or double). Default: float.
- * \author Radu B. Rusu
- * \ingroup registration
- */
- template <typename PointSource, typename PointTarget, typename MatScalar = float>
- class TransformationEstimationLM : public TransformationEstimation<PointSource, PointTarget, MatScalar>
- {
- using PointCloudSource = pcl::PointCloud<PointSource>;
- using PointCloudSourcePtr = typename PointCloudSource::Ptr;
- using PointCloudSourceConstPtr = typename PointCloudSource::ConstPtr;
-
- using PointCloudTarget = pcl::PointCloud<PointTarget>;
-
- using PointIndicesPtr = PointIndices::Ptr;
- using PointIndicesConstPtr = PointIndices::ConstPtr;
-
- public:
- using Ptr = shared_ptr<TransformationEstimationLM<PointSource, PointTarget, MatScalar> >;
- using ConstPtr = shared_ptr<const TransformationEstimationLM<PointSource, PointTarget, MatScalar> >;
-
- using VectorX = Eigen::Matrix<MatScalar, Eigen::Dynamic, 1>;
- using Vector4 = Eigen::Matrix<MatScalar, 4, 1>;
- using Matrix4 = typename TransformationEstimation<PointSource, PointTarget, MatScalar>::Matrix4;
-
- /** \brief Constructor. */
- TransformationEstimationLM ();
-
- /** \brief Copy constructor.
- * \param[in] src the TransformationEstimationLM object to copy into this
- */
- TransformationEstimationLM (const TransformationEstimationLM &src) :
- tmp_src_ (src.tmp_src_),
- tmp_tgt_ (src.tmp_tgt_),
- tmp_idx_src_ (src.tmp_idx_src_),
- tmp_idx_tgt_ (src.tmp_idx_tgt_),
- warp_point_ (src.warp_point_)
- {};
-
- /** \brief Copy operator.
- * \param[in] src the TransformationEstimationLM object to copy into this
- */
- TransformationEstimationLM&
- operator = (const TransformationEstimationLM &src)
- {
- tmp_src_ = src.tmp_src_;
- tmp_tgt_ = src.tmp_tgt_;
- tmp_idx_src_ = src.tmp_idx_src_;
- tmp_idx_tgt_ = src.tmp_idx_tgt_;
- warp_point_ = src.warp_point_;
- }
-
- /** \brief Destructor. */
- ~TransformationEstimationLM () {};
-
- /** \brief Estimate a rigid rotation transformation between a source and a target point cloud using LM.
- * \param[in] cloud_src the source point cloud dataset
- * \param[in] cloud_tgt the target point cloud dataset
- * \param[out] transformation_matrix the resultant transformation matrix
- */
- inline void
- estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- Matrix4 &transformation_matrix) const override;
-
- /** \brief Estimate a rigid rotation transformation between a source and a target point cloud using LM.
- * \param[in] cloud_src the source point cloud dataset
- * \param[in] indices_src the vector of indices describing the points of interest in \a cloud_src
- * \param[in] cloud_tgt the target point cloud dataset
- * \param[out] transformation_matrix the resultant transformation matrix
- */
- inline void
- estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const std::vector<int> &indices_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- Matrix4 &transformation_matrix) const override;
-
- /** \brief Estimate a rigid rotation transformation between a source and a target point cloud using LM.
- * \param[in] cloud_src the source point cloud dataset
- * \param[in] indices_src the vector of indices describing the points of interest in \a cloud_src
- * \param[in] cloud_tgt the target point cloud dataset
- * \param[in] indices_tgt the vector of indices describing the correspondences of the interest points from
- * \a indices_src
- * \param[out] transformation_matrix the resultant transformation matrix
- */
- inline void
- estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const std::vector<int> &indices_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- const std::vector<int> &indices_tgt,
- Matrix4 &transformation_matrix) const override;
-
- /** \brief Estimate a rigid rotation transformation between a source and a target point cloud using LM.
- * \param[in] cloud_src the source point cloud dataset
- * \param[in] cloud_tgt the target point cloud dataset
- * \param[in] correspondences the vector of correspondences between source and target point cloud
- * \param[out] transformation_matrix the resultant transformation matrix
- */
- inline void
- estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- const pcl::Correspondences &correspondences,
- Matrix4 &transformation_matrix) const override;
-
- /** \brief Set the function we use to warp points. Defaults to rigid 6D warp.
- * \param[in] warp_fcn a shared pointer to an object that warps points
- */
- void
- setWarpFunction (const typename WarpPointRigid<PointSource, PointTarget, MatScalar>::Ptr &warp_fcn)
- {
- warp_point_ = warp_fcn;
- }
-
- protected:
- /** \brief Compute the distance between a source point and its corresponding target point
- * \param[in] p_src The source point
- * \param[in] p_tgt The target point
- * \return The distance between \a p_src and \a p_tgt
- *
- * \note Older versions of PCL used this method internally for calculating the
- * optimization gradient. Since PCL 1.7, a switch has been made to the
- * computeDistance method using Vector4 types instead. This method is only
- * kept for API compatibility reasons.
- */
- virtual MatScalar
- computeDistance (const PointSource &p_src, const PointTarget &p_tgt) const
- {
- Vector4 s (p_src.x, p_src.y, p_src.z, 0);
- Vector4 t (p_tgt.x, p_tgt.y, p_tgt.z, 0);
- return ((s - t).norm ());
- }
-
- /** \brief Compute the distance between a source point and its corresponding target point
- * \param[in] p_src The source point
- * \param[in] p_tgt The target point
- * \return The distance between \a p_src and \a p_tgt
- *
- * \note A different distance function can be defined by creating a subclass of
- * TransformationEstimationLM and overriding this method.
- * (See \a TransformationEstimationPointToPlane)
- */
- virtual MatScalar
- computeDistance (const Vector4 &p_src, const PointTarget &p_tgt) const
- {
- Vector4 t (p_tgt.x, p_tgt.y, p_tgt.z, 0);
- return ((p_src - t).norm ());
- }
-
- /** \brief Temporary pointer to the source dataset. */
- mutable const PointCloudSource *tmp_src_;
-
- /** \brief Temporary pointer to the target dataset. */
- mutable const PointCloudTarget *tmp_tgt_;
-
- /** \brief Temporary pointer to the source dataset indices. */
- mutable const std::vector<int> *tmp_idx_src_;
-
- /** \brief Temporary pointer to the target dataset indices. */
- mutable const std::vector<int> *tmp_idx_tgt_;
-
- /** \brief The parameterized function used to warp the source to the target. */
- typename pcl::registration::WarpPointRigid<PointSource, PointTarget, MatScalar>::Ptr warp_point_;
-
- /** Base functor all the models that need non linear optimization must
- * define their own one and implement operator() (const Eigen::VectorXd& x, Eigen::VectorXd& fvec)
- * or operator() (const Eigen::VectorXf& x, Eigen::VectorXf& fvec) depending on the chosen _Scalar
- */
- template<typename _Scalar, int NX=Eigen::Dynamic, int NY=Eigen::Dynamic>
- struct Functor
- {
- using Scalar = _Scalar;
- enum
- {
- InputsAtCompileTime = NX,
- ValuesAtCompileTime = NY
- };
- using InputType = Eigen::Matrix<_Scalar,InputsAtCompileTime,1>;
- using ValueType = Eigen::Matrix<_Scalar,ValuesAtCompileTime,1>;
- using JacobianType = Eigen::Matrix<_Scalar,ValuesAtCompileTime,InputsAtCompileTime>;
-
- /** \brief Empty Constructor. */
- Functor () : m_data_points_ (ValuesAtCompileTime) {}
-
- /** \brief Constructor
- * \param[in] m_data_points number of data points to evaluate.
- */
- Functor (int m_data_points) : m_data_points_ (m_data_points) {}
-
- /** \brief Destructor. */
- virtual ~Functor () {}
-
- /** \brief Get the number of values. */
- int
- values () const { return (m_data_points_); }
-
- protected:
- int m_data_points_;
- };
-
- struct OptimizationFunctor : public Functor<MatScalar>
- {
- using Functor<MatScalar>::values;
-
- /** Functor constructor
- * \param[in] m_data_points the number of data points to evaluate
- * \param[in,out] estimator pointer to the estimator object
- */
- OptimizationFunctor (int m_data_points,
- const TransformationEstimationLM *estimator)
- : Functor<MatScalar> (m_data_points), estimator_ (estimator)
- {}
-
- /** Copy constructor
- * \param[in] src the optimization functor to copy into this
- */
- inline OptimizationFunctor (const OptimizationFunctor &src) :
- Functor<MatScalar> (src.m_data_points_), estimator_ ()
- {
- *this = src;
- }
-
- /** Copy operator
- * \param[in] src the optimization functor to copy into this
- */
- inline OptimizationFunctor&
- operator = (const OptimizationFunctor &src)
- {
- Functor<MatScalar>::operator=(src);
- estimator_ = src.estimator_;
- return (*this);
- }
-
- /** \brief Destructor. */
- ~OptimizationFunctor () {}
-
- /** Fill fvec from x. For the current state vector x fill the f values
- * \param[in] x state vector
- * \param[out] fvec f values vector
- */
- int
- operator () (const VectorX &x, VectorX &fvec) const;
-
- const TransformationEstimationLM<PointSource, PointTarget, MatScalar> *estimator_;
- };
-
- struct OptimizationFunctorWithIndices : public Functor<MatScalar>
- {
- using Functor<MatScalar>::values;
-
- /** Functor constructor
- * \param[in] m_data_points the number of data points to evaluate
- * \param[in,out] estimator pointer to the estimator object
- */
- OptimizationFunctorWithIndices (int m_data_points,
- const TransformationEstimationLM *estimator)
- : Functor<MatScalar> (m_data_points), estimator_ (estimator)
- {}
-
- /** Copy constructor
- * \param[in] src the optimization functor to copy into this
- */
- inline OptimizationFunctorWithIndices (const OptimizationFunctorWithIndices &src)
- : Functor<MatScalar> (src.m_data_points_), estimator_ ()
- {
- *this = src;
- }
-
- /** Copy operator
- * \param[in] src the optimization functor to copy into this
- */
- inline OptimizationFunctorWithIndices&
- operator = (const OptimizationFunctorWithIndices &src)
- {
- Functor<MatScalar>::operator=(src);
- estimator_ = src.estimator_;
- return (*this);
- }
-
- /** \brief Destructor. */
- ~OptimizationFunctorWithIndices () {}
-
- /** Fill fvec from x. For the current state vector x fill the f values
- * \param[in] x state vector
- * \param[out] fvec f values vector
- */
- int
- operator () (const VectorX &x, VectorX &fvec) const;
-
- const TransformationEstimationLM<PointSource, PointTarget, MatScalar> *estimator_;
- };
- public:
- PCL_MAKE_ALIGNED_OPERATOR_NEW
- };
+ tmp_src_ = src.tmp_src_;
+ tmp_tgt_ = src.tmp_tgt_;
+ tmp_idx_src_ = src.tmp_idx_src_;
+ tmp_idx_tgt_ = src.tmp_idx_tgt_;
+ warp_point_ = src.warp_point_;
+ }
+
+ /** \brief Destructor. */
+ ~TransformationEstimationLM(){};
+
+ /** \brief Estimate a rigid rotation transformation between a source and a target
+ * point cloud using LM. \param[in] cloud_src the source point cloud dataset
+ * \param[in] cloud_tgt the target point cloud dataset
+ * \param[out] transformation_matrix the resultant transformation matrix
+ */
+ inline void
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ Matrix4& transformation_matrix) const override;
+
+ /** \brief Estimate a rigid rotation transformation between a source and a target
+ * point cloud using LM. \param[in] cloud_src the source point cloud dataset
+ * \param[in] indices_src the vector of indices describing the points of interest in
+ * \a cloud_src
+ * \param[in] cloud_tgt the target point cloud dataset
+ * \param[out] transformation_matrix the resultant transformation matrix
+ */
+ inline void
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::Indices& indices_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ Matrix4& transformation_matrix) const override;
+
+ /** \brief Estimate a rigid rotation transformation between a source and a target
+ * point cloud using LM. \param[in] cloud_src the source point cloud dataset
+ * \param[in] indices_src the vector of indices describing the points of interest in
+ * \a cloud_src
+ * \param[in] cloud_tgt the target point cloud dataset
+ * \param[in] indices_tgt the vector of indices describing the correspondences of the
+ * interest points from \a indices_src
+ * \param[out] transformation_matrix the resultant transformation matrix
+ */
+ inline void
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::Indices& indices_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ const pcl::Indices& indices_tgt,
+ Matrix4& transformation_matrix) const override;
+
+ /** \brief Estimate a rigid rotation transformation between a source and a target
+ * point cloud using LM. \param[in] cloud_src the source point cloud dataset
+ * \param[in] cloud_tgt the target point cloud dataset
+ * \param[in] correspondences the vector of correspondences between source and target
+ * point cloud \param[out] transformation_matrix the resultant transformation matrix
+ */
+ inline void
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ const pcl::Correspondences& correspondences,
+ Matrix4& transformation_matrix) const override;
+
+ /** \brief Set the function we use to warp points. Defaults to rigid 6D warp.
+ * \param[in] warp_fcn a shared pointer to an object that warps points
+ */
+ void
+ setWarpFunction(
+ const typename WarpPointRigid<PointSource, PointTarget, MatScalar>::Ptr& warp_fcn)
+ {
+ warp_point_ = warp_fcn;
+ }
+
+protected:
+ /** \brief Compute the distance between a source point and its corresponding target
+ * point \param[in] p_src The source point \param[in] p_tgt The target point \return
+ * The distance between \a p_src and \a p_tgt
+ *
+ * \note Older versions of PCL used this method internally for calculating the
+ * optimization gradient. Since PCL 1.7, a switch has been made to the
+ * computeDistance method using Vector4 types instead. This method is only
+ * kept for API compatibility reasons.
+ */
+ virtual MatScalar
+ computeDistance(const PointSource& p_src, const PointTarget& p_tgt) const
+ {
+ Vector4 s(p_src.x, p_src.y, p_src.z, 0);
+ Vector4 t(p_tgt.x, p_tgt.y, p_tgt.z, 0);
+ return ((s - t).norm());
}
-}
+
+ /** \brief Compute the distance between a source point and its corresponding target
+ * point \param[in] p_src The source point \param[in] p_tgt The target point \return
+ * The distance between \a p_src and \a p_tgt
+ *
+ * \note A different distance function can be defined by creating a subclass of
+ * TransformationEstimationLM and overriding this method.
+ * (See \a TransformationEstimationPointToPlane)
+ */
+ virtual MatScalar
+ computeDistance(const Vector4& p_src, const PointTarget& p_tgt) const
+ {
+ Vector4 t(p_tgt.x, p_tgt.y, p_tgt.z, 0);
+ return ((p_src - t).norm());
+ }
+
+ /** \brief Temporary pointer to the source dataset. */
+ mutable const PointCloudSource* tmp_src_;
+
+ /** \brief Temporary pointer to the target dataset. */
+ mutable const PointCloudTarget* tmp_tgt_;
+
+ /** \brief Temporary pointer to the source dataset indices. */
+ mutable const pcl::Indices* tmp_idx_src_;
+
+ /** \brief Temporary pointer to the target dataset indices. */
+ mutable const pcl::Indices* tmp_idx_tgt_;
+
+ /** \brief The parameterized function used to warp the source to the target. */
+ typename pcl::registration::WarpPointRigid<PointSource, PointTarget, MatScalar>::Ptr
+ warp_point_;
+
+ /** Base functor all the models that need non linear optimization must
+ * define their own one and implement operator() (const Eigen::VectorXd& x,
+ * Eigen::VectorXd& fvec) or operator() (const Eigen::VectorXf& x, Eigen::VectorXf&
+ * fvec) depending on the chosen _Scalar
+ */
+ template <typename _Scalar, int NX = Eigen::Dynamic, int NY = Eigen::Dynamic>
+ struct Functor {
+ using Scalar = _Scalar;
+ enum { InputsAtCompileTime = NX, ValuesAtCompileTime = NY };
+ using InputType = Eigen::Matrix<_Scalar, InputsAtCompileTime, 1>;
+ using ValueType = Eigen::Matrix<_Scalar, ValuesAtCompileTime, 1>;
+ using JacobianType =
+ Eigen::Matrix<_Scalar, ValuesAtCompileTime, InputsAtCompileTime>;
+
+ /** \brief Empty Constructor. */
+ Functor() : m_data_points_(ValuesAtCompileTime) {}
+
+ /** \brief Constructor
+ * \param[in] m_data_points number of data points to evaluate.
+ */
+ Functor(int m_data_points) : m_data_points_(m_data_points) {}
+
+ /** \brief Destructor. */
+ virtual ~Functor() {}
+
+ /** \brief Get the number of values. */
+ int
+ values() const
+ {
+ return (m_data_points_);
+ }
+
+ protected:
+ int m_data_points_;
+ };
+
+ struct OptimizationFunctor : public Functor<MatScalar> {
+ using Functor<MatScalar>::values;
+
+ /** Functor constructor
+ * \param[in] m_data_points the number of data points to evaluate
+ * \param[in,out] estimator pointer to the estimator object
+ */
+ OptimizationFunctor(int m_data_points, const TransformationEstimationLM* estimator)
+ : Functor<MatScalar>(m_data_points), estimator_(estimator)
+ {}
+
+ /** Copy constructor
+ * \param[in] src the optimization functor to copy into this
+ */
+ inline OptimizationFunctor(const OptimizationFunctor& src)
+ : Functor<MatScalar>(src.m_data_points_), estimator_()
+ {
+ *this = src;
+ }
+
+ /** Copy operator
+ * \param[in] src the optimization functor to copy into this
+ */
+ inline OptimizationFunctor&
+ operator=(const OptimizationFunctor& src)
+ {
+ Functor<MatScalar>::operator=(src);
+ estimator_ = src.estimator_;
+ return (*this);
+ }
+
+ /** \brief Destructor. */
+ ~OptimizationFunctor() {}
+
+ /** Fill fvec from x. For the current state vector x fill the f values
+ * \param[in] x state vector
+ * \param[out] fvec f values vector
+ */
+ int
+ operator()(const VectorX& x, VectorX& fvec) const;
+
+ const TransformationEstimationLM<PointSource, PointTarget, MatScalar>* estimator_;
+ };
+
+ struct OptimizationFunctorWithIndices : public Functor<MatScalar> {
+ using Functor<MatScalar>::values;
+
+ /** Functor constructor
+ * \param[in] m_data_points the number of data points to evaluate
+ * \param[in,out] estimator pointer to the estimator object
+ */
+ OptimizationFunctorWithIndices(int m_data_points,
+ const TransformationEstimationLM* estimator)
+ : Functor<MatScalar>(m_data_points), estimator_(estimator)
+ {}
+
+ /** Copy constructor
+ * \param[in] src the optimization functor to copy into this
+ */
+ inline OptimizationFunctorWithIndices(const OptimizationFunctorWithIndices& src)
+ : Functor<MatScalar>(src.m_data_points_), estimator_()
+ {
+ *this = src;
+ }
+
+ /** Copy operator
+ * \param[in] src the optimization functor to copy into this
+ */
+ inline OptimizationFunctorWithIndices&
+ operator=(const OptimizationFunctorWithIndices& src)
+ {
+ Functor<MatScalar>::operator=(src);
+ estimator_ = src.estimator_;
+ return (*this);
+ }
+
+ /** \brief Destructor. */
+ ~OptimizationFunctorWithIndices() {}
+
+ /** Fill fvec from x. For the current state vector x fill the f values
+ * \param[in] x state vector
+ * \param[out] fvec f values vector
+ */
+ int
+ operator()(const VectorX& x, VectorX& fvec) const;
+
+ const TransformationEstimationLM<PointSource, PointTarget, MatScalar>* estimator_;
+ };
+
+public:
+ PCL_MAKE_ALIGNED_OPERATOR_NEW
+};
+} // namespace registration
+} // namespace pcl
#include <pcl/registration/impl/transformation_estimation_lm.hpp>
#include <pcl/registration/transformation_estimation_lm.h>
#include <pcl/registration/warp_point_rigid.h>
-namespace pcl
-{
- namespace registration
- {
- /** @b TransformationEstimationPointToPlane uses Levenberg Marquardt optimization to find the
- * transformation that minimizes the point-to-plane distance between the given correspondences.
- *
- * \author Michael Dixon
- * \ingroup registration
- */
- template <typename PointSource, typename PointTarget, typename Scalar = float>
- class TransformationEstimationPointToPlane : public TransformationEstimationLM<PointSource, PointTarget, Scalar>
- {
- public:
- using Ptr = shared_ptr<TransformationEstimationPointToPlane<PointSource, PointTarget, Scalar> >;
- using ConstPtr = shared_ptr<const TransformationEstimationPointToPlane<PointSource, PointTarget, Scalar> >;
-
- using PointCloudSource = pcl::PointCloud<PointSource>;
- using PointCloudSourcePtr = typename PointCloudSource::Ptr;
- using PointCloudSourceConstPtr = typename PointCloudSource::ConstPtr;
- using PointCloudTarget = pcl::PointCloud<PointTarget>;
- using PointIndicesPtr = PointIndices::Ptr;
- using PointIndicesConstPtr = PointIndices::ConstPtr;
+namespace pcl {
+namespace registration {
+/** @b TransformationEstimationPointToPlane uses Levenberg Marquardt optimization to
+ * find the transformation that minimizes the point-to-plane distance between the given
+ * correspondences.
+ *
+ * \author Michael Dixon
+ * \ingroup registration
+ */
+template <typename PointSource, typename PointTarget, typename Scalar = float>
+class TransformationEstimationPointToPlane
+: public TransformationEstimationLM<PointSource, PointTarget, Scalar> {
+public:
+ using Ptr = shared_ptr<
+ TransformationEstimationPointToPlane<PointSource, PointTarget, Scalar>>;
+ using ConstPtr = shared_ptr<
+ const TransformationEstimationPointToPlane<PointSource, PointTarget, Scalar>>;
- using Vector4 = Eigen::Matrix<Scalar, 4, 1>;
+ using PointCloudSource = pcl::PointCloud<PointSource>;
+ using PointCloudSourcePtr = typename PointCloudSource::Ptr;
+ using PointCloudSourceConstPtr = typename PointCloudSource::ConstPtr;
+ using PointCloudTarget = pcl::PointCloud<PointTarget>;
+ using PointIndicesPtr = PointIndices::Ptr;
+ using PointIndicesConstPtr = PointIndices::ConstPtr;
- TransformationEstimationPointToPlane () {};
- ~TransformationEstimationPointToPlane () {};
+ using Vector4 = Eigen::Matrix<Scalar, 4, 1>;
- protected:
- Scalar
- computeDistance (const PointSource &p_src, const PointTarget &p_tgt) const override
- {
- // Compute the point-to-plane distance
- Vector4 s (p_src.x, p_src.y, p_src.z, 0);
- Vector4 t (p_tgt.x, p_tgt.y, p_tgt.z, 0);
- Vector4 n (p_tgt.normal_x, p_tgt.normal_y, p_tgt.normal_z, 0);
- return ((s - t).dot (n));
- }
+ TransformationEstimationPointToPlane(){};
+ ~TransformationEstimationPointToPlane(){};
- Scalar
- computeDistance (const Vector4 &p_src, const PointTarget &p_tgt) const override
- {
- // Compute the point-to-plane distance
- Vector4 t (p_tgt.x, p_tgt.y, p_tgt.z, 0);
- Vector4 n (p_tgt.normal_x, p_tgt.normal_y, p_tgt.normal_z, 0);
- return ((p_src - t).dot (n));
- }
+protected:
+ Scalar
+ computeDistance(const PointSource& p_src, const PointTarget& p_tgt) const override
+ {
+ // Compute the point-to-plane distance
+ Vector4 s(p_src.x, p_src.y, p_src.z, 0);
+ Vector4 t(p_tgt.x, p_tgt.y, p_tgt.z, 0);
+ Vector4 n(p_tgt.normal_x, p_tgt.normal_y, p_tgt.normal_z, 0);
+ return ((s - t).dot(n));
+ }
- };
+ Scalar
+ computeDistance(const Vector4& p_src, const PointTarget& p_tgt) const override
+ {
+ // Compute the point-to-plane distance
+ Vector4 t(p_tgt.x, p_tgt.y, p_tgt.z, 0);
+ Vector4 n(p_tgt.normal_x, p_tgt.normal_y, p_tgt.normal_z, 0);
+ return ((p_src - t).dot(n));
}
-}
+};
+} // namespace registration
+} // namespace pcl
#include <pcl/registration/warp_point_rigid.h>
#include <pcl/cloud_iterator.h>
-namespace pcl
-{
- namespace registration
- {
- /** \brief @b TransformationEstimationPointToPlaneLLS implements a Linear Least Squares (LLS) approximation
- * for minimizing the point-to-plane distance between two clouds of corresponding points with normals.
- *
- * For additional details, see
- * "Linear Least-Squares Optimization for Point-to-Plane ICP Surface Registration", Kok-Lim Low, 2004
- *
- * \note The class is templated on the source and target point types as well as on the output scalar of the
- * transformation matrix (i.e., float or double). Default: float.
- * \author Michael Dixon
- * \ingroup registration
- */
- template <typename PointSource, typename PointTarget, typename Scalar = float>
- class TransformationEstimationPointToPlaneLLS : public TransformationEstimation<PointSource, PointTarget, Scalar>
- {
- public:
- using Ptr = shared_ptr<TransformationEstimationPointToPlaneLLS<PointSource, PointTarget, Scalar> >;
- using ConstPtr = shared_ptr<const TransformationEstimationPointToPlaneLLS<PointSource, PointTarget, Scalar> >;
+namespace pcl {
+namespace registration {
+/** \brief @b TransformationEstimationPointToPlaneLLS implements a Linear Least Squares
+ * (LLS) approximation for minimizing the point-to-plane distance between two clouds of
+ * corresponding points with normals.
+ *
+ * For additional details, see
+ * "Linear Least-Squares Optimization for Point-to-Plane ICP Surface Registration",
+ * Kok-Lim Low, 2004
+ *
+ * \note The class is templated on the source and target point types as well as on the
+ * output scalar of the transformation matrix (i.e., float or double). Default: float.
+ * \author Michael Dixon
+ * \ingroup registration
+ */
+template <typename PointSource, typename PointTarget, typename Scalar = float>
+class TransformationEstimationPointToPlaneLLS
+: public TransformationEstimation<PointSource, PointTarget, Scalar> {
+public:
+ using Ptr = shared_ptr<
+ TransformationEstimationPointToPlaneLLS<PointSource, PointTarget, Scalar>>;
+ using ConstPtr = shared_ptr<
+ const TransformationEstimationPointToPlaneLLS<PointSource, PointTarget, Scalar>>;
- using Matrix4 = typename TransformationEstimation<PointSource, PointTarget, Scalar>::Matrix4;
-
- TransformationEstimationPointToPlaneLLS () {};
- ~TransformationEstimationPointToPlaneLLS () {};
+ using Matrix4 =
+ typename TransformationEstimation<PointSource, PointTarget, Scalar>::Matrix4;
- /** \brief Estimate a rigid rotation transformation between a source and a target point cloud using SVD.
- * \param[in] cloud_src the source point cloud dataset
- * \param[in] cloud_tgt the target point cloud dataset
- * \param[out] transformation_matrix the resultant transformation matrix
- */
- inline void
- estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- Matrix4 &transformation_matrix) const override;
+ TransformationEstimationPointToPlaneLLS(){};
+ ~TransformationEstimationPointToPlaneLLS(){};
- /** \brief Estimate a rigid rotation transformation between a source and a target point cloud using SVD.
- * \param[in] cloud_src the source point cloud dataset
- * \param[in] indices_src the vector of indices describing the points of interest in \a cloud_src
- * \param[in] cloud_tgt the target point cloud dataset
- * \param[out] transformation_matrix the resultant transformation matrix
- */
- inline void
- estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const std::vector<int> &indices_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- Matrix4 &transformation_matrix) const override;
+ /** \brief Estimate a rigid rotation transformation between a source and a target
+ * point cloud using SVD. \param[in] cloud_src the source point cloud dataset
+ * \param[in] cloud_tgt the target point cloud dataset
+ * \param[out] transformation_matrix the resultant transformation matrix
+ */
+ inline void
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ Matrix4& transformation_matrix) const override;
- /** \brief Estimate a rigid rotation transformation between a source and a target point cloud using SVD.
- * \param[in] cloud_src the source point cloud dataset
- * \param[in] indices_src the vector of indices describing the points of interest in \a cloud_src
- * \param[in] cloud_tgt the target point cloud dataset
- * \param[in] indices_tgt the vector of indices describing the correspondences of the interest points from \a indices_src
- * \param[out] transformation_matrix the resultant transformation matrix
- */
- inline void
- estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const std::vector<int> &indices_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- const std::vector<int> &indices_tgt,
- Matrix4 &transformation_matrix) const override;
+ /** \brief Estimate a rigid rotation transformation between a source and a target
+ * point cloud using SVD. \param[in] cloud_src the source point cloud dataset
+ * \param[in] indices_src the vector of indices describing the points of interest in
+ * \a cloud_src
+ * \param[in] cloud_tgt the target point cloud dataset
+ * \param[out] transformation_matrix the resultant transformation matrix
+ */
+ inline void
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::Indices& indices_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ Matrix4& transformation_matrix) const override;
- /** \brief Estimate a rigid rotation transformation between a source and a target point cloud using SVD.
- * \param[in] cloud_src the source point cloud dataset
- * \param[in] cloud_tgt the target point cloud dataset
- * \param[in] correspondences the vector of correspondences between source and target point cloud
- * \param[out] transformation_matrix the resultant transformation matrix
- */
- inline void
- estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- const pcl::Correspondences &correspondences,
- Matrix4 &transformation_matrix) const override;
+ /** \brief Estimate a rigid rotation transformation between a source and a target
+ * point cloud using SVD. \param[in] cloud_src the source point cloud dataset
+ * \param[in] indices_src the vector of indices describing the points of interest in
+ * \a cloud_src
+ * \param[in] cloud_tgt the target point cloud dataset
+ * \param[in] indices_tgt the vector of indices describing the correspondences of the
+ * interest points from \a indices_src
+ * \param[out] transformation_matrix the resultant transformation matrix
+ */
+ inline void
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::Indices& indices_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ const pcl::Indices& indices_tgt,
+ Matrix4& transformation_matrix) const override;
- protected:
-
- /** \brief Estimate a rigid rotation transformation between a source and a target
- * \param[in] source_it an iterator over the source point cloud dataset
- * \param[in] target_it an iterator over the target point cloud dataset
- * \param[out] transformation_matrix the resultant transformation matrix
- */
- void
- estimateRigidTransformation (ConstCloudIterator<PointSource>& source_it,
- ConstCloudIterator<PointTarget>& target_it,
- Matrix4 &transformation_matrix) const;
+ /** \brief Estimate a rigid rotation transformation between a source and a target
+ * point cloud using SVD. \param[in] cloud_src the source point cloud dataset
+ * \param[in] cloud_tgt the target point cloud dataset
+ * \param[in] correspondences the vector of correspondences between source and target
+ * point cloud \param[out] transformation_matrix the resultant transformation matrix
+ */
+ inline void
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ const pcl::Correspondences& correspondences,
+ Matrix4& transformation_matrix) const override;
- /** \brief Construct a 4 by 4 transformation matrix from the provided rotation and translation.
- * \param[in] alpha the rotation about the x-axis
- * \param[in] beta the rotation about the y-axis
- * \param[in] gamma the rotation about the z-axis
- * \param[in] tx the x translation
- * \param[in] ty the y translation
- * \param[in] tz the z translation
- * \param[out] transformation_matrix the resultant transformation matrix
- */
- inline void
- constructTransformationMatrix (const double & alpha, const double & beta, const double & gamma,
- const double & tx, const double & ty, const double & tz,
- Matrix4 &transformation_matrix) const;
+protected:
+ /** \brief Estimate a rigid rotation transformation between a source and a target
+ * \param[in] source_it an iterator over the source point cloud dataset
+ * \param[in] target_it an iterator over the target point cloud dataset
+ * \param[out] transformation_matrix the resultant transformation matrix
+ */
+ void
+ estimateRigidTransformation(ConstCloudIterator<PointSource>& source_it,
+ ConstCloudIterator<PointTarget>& target_it,
+ Matrix4& transformation_matrix) const;
- };
- }
-}
+ /** \brief Construct a 4 by 4 transformation matrix from the provided rotation and
+ * translation. \param[in] alpha the rotation about the x-axis \param[in] beta the
+ * rotation about the y-axis \param[in] gamma the rotation about the z-axis \param[in]
+ * tx the x translation \param[in] ty the y translation \param[in] tz the z
+ * translation \param[out] transformation_matrix the resultant transformation matrix
+ */
+ inline void
+ constructTransformationMatrix(const double& alpha,
+ const double& beta,
+ const double& gamma,
+ const double& tx,
+ const double& ty,
+ const double& tz,
+ Matrix4& transformation_matrix) const;
+};
+} // namespace registration
+} // namespace pcl
#include <pcl/registration/impl/transformation_estimation_point_to_plane_lls.hpp>
#include <pcl/registration/warp_point_rigid.h>
#include <pcl/cloud_iterator.h>
-namespace pcl
-{
- namespace registration
- {
- /** \brief @b TransformationEstimationPointToPlaneLLSWeighted implements a Linear Least Squares (LLS) approximation
- * for minimizing the point-to-plane distance between two clouds of corresponding points with normals, with the
- * possibility of assigning weights to the correspondences.
- *
- * For additional details, see
- * "Linear Least-Squares Optimization for Point-to-Plane ICP Surface Registration", Kok-Lim Low, 2004
- *
- * \note The class is templated on the source and target point types as well as on the output scalar of the
- * transformation matrix (i.e., float or double). Default: float.
- * \author Alex Ichim
- * \ingroup registration
- */
- template <typename PointSource, typename PointTarget, typename Scalar = float>
- class TransformationEstimationPointToPlaneLLSWeighted : public TransformationEstimation<PointSource, PointTarget, Scalar>
- {
- public:
- using Ptr = shared_ptr<TransformationEstimationPointToPlaneLLSWeighted<PointSource, PointTarget, Scalar> >;
- using ConstPtr = shared_ptr<const TransformationEstimationPointToPlaneLLSWeighted<PointSource, PointTarget, Scalar> >;
+namespace pcl {
+namespace registration {
+/** \brief @b TransformationEstimationPointToPlaneLLSWeighted implements a Linear Least
+ * Squares (LLS) approximation for minimizing the point-to-plane distance between two
+ * clouds of corresponding points with normals, with the possibility of assigning
+ * weights to the correspondences.
+ *
+ * For additional details, see
+ * "Linear Least-Squares Optimization for Point-to-Plane ICP Surface Registration",
+ * Kok-Lim Low, 2004
+ *
+ * \note The class is templated on the source and target point types as well as on the
+ * output scalar of the transformation matrix (i.e., float or double). Default: float.
+ * \author Alex Ichim
+ * \ingroup registration
+ */
+template <typename PointSource, typename PointTarget, typename Scalar = float>
+class TransformationEstimationPointToPlaneLLSWeighted
+: public TransformationEstimation<PointSource, PointTarget, Scalar> {
+public:
+ using Ptr = shared_ptr<TransformationEstimationPointToPlaneLLSWeighted<PointSource,
+ PointTarget,
+ Scalar>>;
+ using ConstPtr =
+ shared_ptr<const TransformationEstimationPointToPlaneLLSWeighted<PointSource,
+ PointTarget,
+ Scalar>>;
- using Matrix4 = typename TransformationEstimation<PointSource, PointTarget, Scalar>::Matrix4;
-
- TransformationEstimationPointToPlaneLLSWeighted () { };
- virtual ~TransformationEstimationPointToPlaneLLSWeighted () { };
+ using Matrix4 =
+ typename TransformationEstimation<PointSource, PointTarget, Scalar>::Matrix4;
- /** \brief Estimate a rigid rotation transformation between a source and a target point cloud using SVD.
- * \param[in] cloud_src the source point cloud dataset
- * \param[in] cloud_tgt the target point cloud dataset
- * \param[out] transformation_matrix the resultant transformation matrix
- */
- inline void
- estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- Matrix4 &transformation_matrix) const;
+ TransformationEstimationPointToPlaneLLSWeighted(){};
+ virtual ~TransformationEstimationPointToPlaneLLSWeighted(){};
- /** \brief Estimate a rigid rotation transformation between a source and a target point cloud using SVD.
- * \param[in] cloud_src the source point cloud dataset
- * \param[in] indices_src the vector of indices describing the points of interest in \a cloud_src
- * \param[in] cloud_tgt the target point cloud dataset
- * \param[out] transformation_matrix the resultant transformation matrix
- */
- inline void
- estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const std::vector<int> &indices_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- Matrix4 &transformation_matrix) const;
+ /** \brief Estimate a rigid rotation transformation between a source and a target
+ * point cloud using SVD. \param[in] cloud_src the source point cloud dataset
+ * \param[in] cloud_tgt the target point cloud dataset
+ * \param[out] transformation_matrix the resultant transformation matrix
+ */
+ inline void
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ Matrix4& transformation_matrix) const;
- /** \brief Estimate a rigid rotation transformation between a source and a target point cloud using SVD.
- * \param[in] cloud_src the source point cloud dataset
- * \param[in] indices_src the vector of indices describing the points of interest in \a cloud_src
- * \param[in] cloud_tgt the target point cloud dataset
- * \param[in] indices_tgt the vector of indices describing the correspondences of the interest points from \a indices_src
- * \param[out] transformation_matrix the resultant transformation matrix
- */
- inline void
- estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const std::vector<int> &indices_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- const std::vector<int> &indices_tgt,
- Matrix4 &transformation_matrix) const;
+ /** \brief Estimate a rigid rotation transformation between a source and a target
+ * point cloud using SVD. \param[in] cloud_src the source point cloud dataset
+ * \param[in] indices_src the vector of indices describing the points of interest in
+ * \a cloud_src
+ * \param[in] cloud_tgt the target point cloud dataset
+ * \param[out] transformation_matrix the resultant transformation matrix
+ */
+ inline void
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::Indices& indices_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ Matrix4& transformation_matrix) const;
- /** \brief Estimate a rigid rotation transformation between a source and a target point cloud using SVD.
- * \param[in] cloud_src the source point cloud dataset
- * \param[in] cloud_tgt the target point cloud dataset
- * \param[in] correspondences the vector of correspondences between source and target point cloud
- * \param[out] transformation_matrix the resultant transformation matrix
- */
- inline void
- estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- const pcl::Correspondences &correspondences,
- Matrix4 &transformation_matrix) const;
+ /** \brief Estimate a rigid rotation transformation between a source and a target
+ * point cloud using SVD. \param[in] cloud_src the source point cloud dataset
+ * \param[in] indices_src the vector of indices describing the points of interest in
+ * \a cloud_src
+ * \param[in] cloud_tgt the target point cloud dataset
+ * \param[in] indices_tgt the vector of indices describing the correspondences of the
+ * interest points from \a indices_src
+ * \param[out] transformation_matrix the resultant transformation matrix
+ */
+ inline void
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::Indices& indices_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ const pcl::Indices& indices_tgt,
+ Matrix4& transformation_matrix) const;
+ /** \brief Estimate a rigid rotation transformation between a source and a target
+ * point cloud using SVD. \param[in] cloud_src the source point cloud dataset
+ * \param[in] cloud_tgt the target point cloud dataset
+ * \param[in] correspondences the vector of correspondences between source and target
+ * point cloud \param[out] transformation_matrix the resultant transformation matrix
+ */
+ inline void
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ const pcl::Correspondences& correspondences,
+ Matrix4& transformation_matrix) const;
- /** \brief Set the weights for the correspondences.
- * \param[in] weights the weights for each correspondence
- */
- inline void
- setCorrespondenceWeights (const std::vector<Scalar> &weights)
- { weights_ = weights; }
+ /** \brief Set the weights for the correspondences.
+ * \param[in] weights the weights for each correspondence
+ */
+ inline void
+ setCorrespondenceWeights(const std::vector<Scalar>& weights)
+ {
+ weights_ = weights;
+ }
- protected:
-
- /** \brief Estimate a rigid rotation transformation between a source and a target
- * \param[in] source_it an iterator over the source point cloud dataset
- * \param[in] target_it an iterator over the target point cloud dataset
- * \param weights_it
- * \param[out] transformation_matrix the resultant transformation matrix
- */
- void
- estimateRigidTransformation (ConstCloudIterator<PointSource>& source_it,
- ConstCloudIterator<PointTarget>& target_it,
- typename std::vector<Scalar>::const_iterator& weights_it,
- Matrix4 &transformation_matrix) const;
+protected:
+ /** \brief Estimate a rigid rotation transformation between a source and a target
+ * \param[in] source_it an iterator over the source point cloud dataset
+ * \param[in] target_it an iterator over the target point cloud dataset
+ * \param weights_it
+ * \param[out] transformation_matrix the resultant transformation matrix
+ */
+ void
+ estimateRigidTransformation(ConstCloudIterator<PointSource>& source_it,
+ ConstCloudIterator<PointTarget>& target_it,
+ typename std::vector<Scalar>::const_iterator& weights_it,
+ Matrix4& transformation_matrix) const;
- /** \brief Construct a 4 by 4 transformation matrix from the provided rotation and translation.
- * \param[in] alpha the rotation about the x-axis
- * \param[in] beta the rotation about the y-axis
- * \param[in] gamma the rotation about the z-axis
- * \param[in] tx the x translation
- * \param[in] ty the y translation
- * \param[in] tz the z translation
- * \param[out] transformation_matrix the resultant transformation matrix
- */
- inline void
- constructTransformationMatrix (const double & alpha, const double & beta, const double & gamma,
- const double & tx, const double & ty, const double & tz,
- Matrix4 &transformation_matrix) const;
+ /** \brief Construct a 4 by 4 transformation matrix from the provided rotation and
+ * translation. \param[in] alpha the rotation about the x-axis \param[in] beta the
+ * rotation about the y-axis \param[in] gamma the rotation about the z-axis \param[in]
+ * tx the x translation \param[in] ty the y translation \param[in] tz the z
+ * translation \param[out] transformation_matrix the resultant transformation matrix
+ */
+ inline void
+ constructTransformationMatrix(const double& alpha,
+ const double& beta,
+ const double& gamma,
+ const double& tx,
+ const double& ty,
+ const double& tz,
+ Matrix4& transformation_matrix) const;
- std::vector<Scalar> weights_;
- };
- }
-}
+ std::vector<Scalar> weights_;
+};
+} // namespace registration
+} // namespace pcl
#include <pcl/registration/impl/transformation_estimation_point_to_plane_lls_weighted.hpp>
#pragma once
-#include <pcl/memory.h>
-#include <pcl/pcl_macros.h>
+#include <pcl/registration/distances.h>
#include <pcl/registration/transformation_estimation_point_to_plane.h>
#include <pcl/registration/warp_point_rigid.h>
-#include <pcl/registration/distances.h>
+#include <pcl/memory.h>
+#include <pcl/pcl_macros.h>
-namespace pcl
-{
- namespace registration
+namespace pcl {
+namespace registration {
+/** @b TransformationEstimationPointToPlaneWeighted uses Levenberg Marquardt
+ * optimization to find the transformation that minimizes the point-to-plane distance
+ * between the given correspondences. In addition to the
+ * TransformationEstimationPointToPlane class, this version takes per-correspondence
+ * weights and optimizes accordingly.
+ *
+ * \author Alexandru-Eugen Ichim
+ * \ingroup registration
+ */
+template <typename PointSource, typename PointTarget, typename MatScalar = float>
+class TransformationEstimationPointToPlaneWeighted
+: public TransformationEstimationPointToPlane<PointSource, PointTarget, MatScalar> {
+ using PointCloudSource = pcl::PointCloud<PointSource>;
+ using PointCloudSourcePtr = typename PointCloudSource::Ptr;
+ using PointCloudSourceConstPtr = typename PointCloudSource::ConstPtr;
+
+ using PointCloudTarget = pcl::PointCloud<PointTarget>;
+
+ using PointIndicesPtr = PointIndices::Ptr;
+ using PointIndicesConstPtr = PointIndices::ConstPtr;
+
+public:
+ using Ptr = shared_ptr<TransformationEstimationPointToPlaneWeighted<PointSource,
+ PointTarget,
+ MatScalar>>;
+ using ConstPtr =
+ shared_ptr<const TransformationEstimationPointToPlaneWeighted<PointSource,
+ PointTarget,
+ MatScalar>>;
+
+ using VectorX = Eigen::Matrix<MatScalar, Eigen::Dynamic, 1>;
+ using Vector4 = Eigen::Matrix<MatScalar, 4, 1>;
+ using Matrix4 =
+ typename TransformationEstimation<PointSource, PointTarget, MatScalar>::Matrix4;
+
+ /** \brief Constructor. */
+ TransformationEstimationPointToPlaneWeighted();
+
+ /** \brief Copy constructor.
+ * \param[in] src the TransformationEstimationPointToPlaneWeighted object to copy into
+ * this
+ */
+ TransformationEstimationPointToPlaneWeighted(
+ const TransformationEstimationPointToPlaneWeighted& src)
+ : tmp_src_(src.tmp_src_)
+ , tmp_tgt_(src.tmp_tgt_)
+ , tmp_idx_src_(src.tmp_idx_src_)
+ , tmp_idx_tgt_(src.tmp_idx_tgt_)
+ , warp_point_(src.warp_point_)
+ , correspondence_weights_(src.correspondence_weights_)
+ , use_correspondence_weights_(src.use_correspondence_weights_){};
+
+ /** \brief Copy operator.
+ * \param[in] src the TransformationEstimationPointToPlaneWeighted object to copy into
+ * this
+ */
+ TransformationEstimationPointToPlaneWeighted&
+ operator=(const TransformationEstimationPointToPlaneWeighted& src)
{
- /** @b TransformationEstimationPointToPlaneWeighted uses Levenberg Marquardt optimization to find the transformation
- * that minimizes the point-to-plane distance between the given correspondences. In addition to the
- * TransformationEstimationPointToPlane class, this version takes per-correspondence weights and optimizes accordingly.
- *
- * \author Alexandru-Eugen Ichim
- * \ingroup registration
- */
- template <typename PointSource, typename PointTarget, typename MatScalar = float>
- class TransformationEstimationPointToPlaneWeighted : public TransformationEstimationPointToPlane<PointSource, PointTarget, MatScalar>
- {
- using PointCloudSource = pcl::PointCloud<PointSource>;
- using PointCloudSourcePtr = typename PointCloudSource::Ptr;
- using PointCloudSourceConstPtr = typename PointCloudSource::ConstPtr;
-
- using PointCloudTarget = pcl::PointCloud<PointTarget>;
-
- using PointIndicesPtr = PointIndices::Ptr;
- using PointIndicesConstPtr = PointIndices::ConstPtr;
-
- public:
- using Ptr = shared_ptr<TransformationEstimationPointToPlaneWeighted<PointSource, PointTarget, MatScalar> >;
- using ConstPtr = shared_ptr<const TransformationEstimationPointToPlaneWeighted<PointSource, PointTarget, MatScalar> >;
-
- using VectorX = Eigen::Matrix<MatScalar, Eigen::Dynamic, 1>;
- using Vector4 = Eigen::Matrix<MatScalar, 4, 1>;
- using Matrix4 = typename TransformationEstimation<PointSource, PointTarget, MatScalar>::Matrix4;
-
- /** \brief Constructor. */
- TransformationEstimationPointToPlaneWeighted ();
-
- /** \brief Copy constructor.
- * \param[in] src the TransformationEstimationPointToPlaneWeighted object to copy into this
- */
- TransformationEstimationPointToPlaneWeighted (const TransformationEstimationPointToPlaneWeighted &src) :
- tmp_src_ (src.tmp_src_),
- tmp_tgt_ (src.tmp_tgt_),
- tmp_idx_src_ (src.tmp_idx_src_),
- tmp_idx_tgt_ (src.tmp_idx_tgt_),
- warp_point_ (src.warp_point_),
- correspondence_weights_ (src.correspondence_weights_),
- use_correspondence_weights_ (src.use_correspondence_weights_)
- {};
-
- /** \brief Copy operator.
- * \param[in] src the TransformationEstimationPointToPlaneWeighted object to copy into this
- */
- TransformationEstimationPointToPlaneWeighted&
- operator = (const TransformationEstimationPointToPlaneWeighted &src)
- {
- tmp_src_ = src.tmp_src_;
- tmp_tgt_ = src.tmp_tgt_;
- tmp_idx_src_ = src.tmp_idx_src_;
- tmp_idx_tgt_ = src.tmp_idx_tgt_;
- warp_point_ = src.warp_point_;
- correspondence_weights_ = src.correspondence_weights_;
- use_correspondence_weights_ = src.use_correspondence_weights_;
- }
-
- /** \brief Destructor. */
- virtual ~TransformationEstimationPointToPlaneWeighted () {};
-
- /** \brief Estimate a rigid rotation transformation between a source and a target point cloud using LM.
- * \param[in] cloud_src the source point cloud dataset
- * \param[in] cloud_tgt the target point cloud dataset
- * \param[out] transformation_matrix the resultant transformation matrix
- * \note Uses the weights given by setWeights.
- */
- inline void
- estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- Matrix4 &transformation_matrix) const;
-
- /** \brief Estimate a rigid rotation transformation between a source and a target point cloud using LM.
- * \param[in] cloud_src the source point cloud dataset
- * \param[in] indices_src the vector of indices describing the points of interest in \a cloud_src
- * \param[in] cloud_tgt the target point cloud dataset
- * \param[out] transformation_matrix the resultant transformation matrix
- * \note Uses the weights given by setWeights.
- */
- inline void
- estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const std::vector<int> &indices_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- Matrix4 &transformation_matrix) const;
-
- /** \brief Estimate a rigid rotation transformation between a source and a target point cloud using LM.
- * \param[in] cloud_src the source point cloud dataset
- * \param[in] indices_src the vector of indices describing the points of interest in \a cloud_src
- * \param[in] cloud_tgt the target point cloud dataset
- * \param[in] indices_tgt the vector of indices describing the correspondences of the interest points from
- * \a indices_src
- * \param[out] transformation_matrix the resultant transformation matrix
- * \note Uses the weights given by setWeights.
- */
- void
- estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const std::vector<int> &indices_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- const std::vector<int> &indices_tgt,
- Matrix4 &transformation_matrix) const;
-
- /** \brief Estimate a rigid rotation transformation between a source and a target point cloud using LM.
- * \param[in] cloud_src the source point cloud dataset
- * \param[in] cloud_tgt the target point cloud dataset
- * \param[in] correspondences the vector of correspondences between source and target point cloud
- * \param[out] transformation_matrix the resultant transformation matrix
- * \note Uses the weights given by setWeights.
- */
- void
- estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- const pcl::Correspondences &correspondences,
- Matrix4 &transformation_matrix) const;
-
-
- inline void
- setWeights (const std::vector<double> &weights)
- { correspondence_weights_ = weights; }
-
- /// use the weights given in the pcl::CorrespondencesPtr for one of the estimateTransformation (...) methods
- inline void
- setUseCorrespondenceWeights (bool use_correspondence_weights)
- { use_correspondence_weights_ = use_correspondence_weights; }
-
- /** \brief Set the function we use to warp points. Defaults to rigid 6D warp.
- * \param[in] warp_fcn a shared pointer to an object that warps points
- */
- void
- setWarpFunction (const typename WarpPointRigid<PointSource, PointTarget, MatScalar>::Ptr &warp_fcn)
- { warp_point_ = warp_fcn; }
-
- protected:
- bool use_correspondence_weights_;
- mutable std::vector<double> correspondence_weights_;
-
- /** \brief Temporary pointer to the source dataset. */
- mutable const PointCloudSource *tmp_src_;
-
- /** \brief Temporary pointer to the target dataset. */
- mutable const PointCloudTarget *tmp_tgt_;
-
- /** \brief Temporary pointer to the source dataset indices. */
- mutable const std::vector<int> *tmp_idx_src_;
-
- /** \brief Temporary pointer to the target dataset indices. */
- mutable const std::vector<int> *tmp_idx_tgt_;
-
- /** \brief The parameterized function used to warp the source to the target. */
- typename pcl::registration::WarpPointRigid<PointSource, PointTarget, MatScalar>::Ptr warp_point_;
-
- /** Base functor all the models that need non linear optimization must
- * define their own one and implement operator() (const Eigen::VectorXd& x, Eigen::VectorXd& fvec)
- * or operator() (const Eigen::VectorXf& x, Eigen::VectorXf& fvec) depending on the chosen _Scalar
- */
- template<typename _Scalar, int NX=Eigen::Dynamic, int NY=Eigen::Dynamic>
- struct Functor
- {
- using Scalar = _Scalar;
- enum
- {
- InputsAtCompileTime = NX,
- ValuesAtCompileTime = NY
- };
- using InputType = Eigen::Matrix<_Scalar,InputsAtCompileTime,1>;
- using ValueType = Eigen::Matrix<_Scalar,ValuesAtCompileTime,1>;
- using JacobianType = Eigen::Matrix<_Scalar,ValuesAtCompileTime,InputsAtCompileTime>;
-
- /** \brief Empty Constructor. */
- Functor () : m_data_points_ (ValuesAtCompileTime) {}
-
- /** \brief Constructor
- * \param[in] m_data_points number of data points to evaluate.
- */
- Functor (int m_data_points) : m_data_points_ (m_data_points) {}
-
- /** \brief Destructor. */
- virtual ~Functor () {}
-
- /** \brief Get the number of values. */
- int
- values () const { return (m_data_points_); }
-
- protected:
- int m_data_points_;
- };
-
- struct OptimizationFunctor : public Functor<MatScalar>
- {
- using Functor<MatScalar>::values;
-
- /** Functor constructor
- * \param[in] m_data_points the number of data points to evaluate
- * \param[in,out] estimator pointer to the estimator object
- */
- OptimizationFunctor (int m_data_points,
- const TransformationEstimationPointToPlaneWeighted *estimator)
- : Functor<MatScalar> (m_data_points), estimator_ (estimator)
- {}
-
- /** Copy constructor
- * \param[in] src the optimization functor to copy into this
- */
- inline OptimizationFunctor (const OptimizationFunctor &src) :
- Functor<MatScalar> (src.m_data_points_), estimator_ ()
- {
- *this = src;
- }
-
- /** Copy operator
- * \param[in] src the optimization functor to copy into this
- */
- inline OptimizationFunctor&
- operator = (const OptimizationFunctor &src)
- {
- Functor<MatScalar>::operator=(src);
- estimator_ = src.estimator_;
- return (*this);
- }
-
- /** \brief Destructor. */
- virtual ~OptimizationFunctor () {}
-
- /** Fill fvec from x. For the current state vector x fill the f values
- * \param[in] x state vector
- * \param[out] fvec f values vector
- */
- int
- operator () (const VectorX &x, VectorX &fvec) const;
-
- const TransformationEstimationPointToPlaneWeighted<PointSource, PointTarget, MatScalar> *estimator_;
- };
-
- struct OptimizationFunctorWithIndices : public Functor<MatScalar>
- {
- using Functor<MatScalar>::values;
-
- /** Functor constructor
- * \param[in] m_data_points the number of data points to evaluate
- * \param[in,out] estimator pointer to the estimator object
- */
- OptimizationFunctorWithIndices (int m_data_points,
- const TransformationEstimationPointToPlaneWeighted *estimator)
- : Functor<MatScalar> (m_data_points), estimator_ (estimator)
- {}
-
- /** Copy constructor
- * \param[in] src the optimization functor to copy into this
- */
- inline OptimizationFunctorWithIndices (const OptimizationFunctorWithIndices &src)
- : Functor<MatScalar> (src.m_data_points_), estimator_ ()
- {
- *this = src;
- }
-
- /** Copy operator
- * \param[in] src the optimization functor to copy into this
- */
- inline OptimizationFunctorWithIndices&
- operator = (const OptimizationFunctorWithIndices &src)
- {
- Functor<MatScalar>::operator=(src);
- estimator_ = src.estimator_;
- return (*this);
- }
-
- /** \brief Destructor. */
- virtual ~OptimizationFunctorWithIndices () {}
-
- /** Fill fvec from x. For the current state vector x fill the f values
- * \param[in] x state vector
- * \param[out] fvec f values vector
- */
- int
- operator () (const VectorX &x, VectorX &fvec) const;
-
- const TransformationEstimationPointToPlaneWeighted<PointSource, PointTarget, MatScalar> *estimator_;
- };
- public:
- PCL_MAKE_ALIGNED_OPERATOR_NEW
- };
+ tmp_src_ = src.tmp_src_;
+ tmp_tgt_ = src.tmp_tgt_;
+ tmp_idx_src_ = src.tmp_idx_src_;
+ tmp_idx_tgt_ = src.tmp_idx_tgt_;
+ warp_point_ = src.warp_point_;
+ correspondence_weights_ = src.correspondence_weights_;
+ use_correspondence_weights_ = src.use_correspondence_weights_;
+ }
+
+ /** \brief Destructor. */
+ virtual ~TransformationEstimationPointToPlaneWeighted(){};
+
+ /** \brief Estimate a rigid rotation transformation between a source and a target
+ * point cloud using LM. \param[in] cloud_src the source point cloud dataset
+ * \param[in] cloud_tgt the target point cloud dataset
+ * \param[out] transformation_matrix the resultant transformation matrix
+ * \note Uses the weights given by setWeights.
+ */
+ inline void
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ Matrix4& transformation_matrix) const;
+
+ /** \brief Estimate a rigid rotation transformation between a source and a target
+ * point cloud using LM. \param[in] cloud_src the source point cloud dataset
+ * \param[in] indices_src the vector of indices describing the points of interest in
+ * \a cloud_src
+ * \param[in] cloud_tgt the target point cloud dataset
+ * \param[out] transformation_matrix the resultant transformation matrix
+ * \note Uses the weights given by setWeights.
+ */
+ inline void
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::Indices& indices_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ Matrix4& transformation_matrix) const;
+
+ /** \brief Estimate a rigid rotation transformation between a source and a target
+ * point cloud using LM. \param[in] cloud_src the source point cloud dataset
+ * \param[in] indices_src the vector of indices describing the points of interest in
+ * \a cloud_src
+ * \param[in] cloud_tgt the target point cloud dataset
+ * \param[in] indices_tgt the vector of indices describing the correspondences of the
+ * interest points from \a indices_src
+ * \param[out] transformation_matrix the resultant transformation matrix
+ * \note Uses the weights given by setWeights.
+ */
+ void
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::Indices& indices_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ const pcl::Indices& indices_tgt,
+ Matrix4& transformation_matrix) const;
+
+ /** \brief Estimate a rigid rotation transformation between a source and a target
+ * point cloud using LM. \param[in] cloud_src the source point cloud dataset
+ * \param[in] cloud_tgt the target point cloud dataset
+ * \param[in] correspondences the vector of correspondences between source and target
+ * point cloud \param[out] transformation_matrix the resultant transformation matrix
+ * \note Uses the weights given by setWeights.
+ */
+ void
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ const pcl::Correspondences& correspondences,
+ Matrix4& transformation_matrix) const;
+
+ inline void
+ setWeights(const std::vector<double>& weights)
+ {
+ correspondence_weights_ = weights;
}
-}
+
+ /// use the weights given in the pcl::CorrespondencesPtr for one of the
+ /// estimateTransformation (...) methods
+ inline void
+ setUseCorrespondenceWeights(bool use_correspondence_weights)
+ {
+ use_correspondence_weights_ = use_correspondence_weights;
+ }
+
+ /** \brief Set the function we use to warp points. Defaults to rigid 6D warp.
+ * \param[in] warp_fcn a shared pointer to an object that warps points
+ */
+ void
+ setWarpFunction(
+ const typename WarpPointRigid<PointSource, PointTarget, MatScalar>::Ptr& warp_fcn)
+ {
+ warp_point_ = warp_fcn;
+ }
+
+protected:
+ bool use_correspondence_weights_;
+ mutable std::vector<double> correspondence_weights_;
+
+ /** \brief Temporary pointer to the source dataset. */
+ mutable const PointCloudSource* tmp_src_;
+
+ /** \brief Temporary pointer to the target dataset. */
+ mutable const PointCloudTarget* tmp_tgt_;
+
+ /** \brief Temporary pointer to the source dataset indices. */
+ mutable const pcl::Indices* tmp_idx_src_;
+
+ /** \brief Temporary pointer to the target dataset indices. */
+ mutable const pcl::Indices* tmp_idx_tgt_;
+
+ /** \brief The parameterized function used to warp the source to the target. */
+ typename pcl::registration::WarpPointRigid<PointSource, PointTarget, MatScalar>::Ptr
+ warp_point_;
+
+ /** Base functor all the models that need non linear optimization must
+ * define their own one and implement operator() (const Eigen::VectorXd& x,
+ * Eigen::VectorXd& fvec) or operator() (const Eigen::VectorXf& x, Eigen::VectorXf&
+ * fvec) depending on the chosen _Scalar
+ */
+ template <typename _Scalar, int NX = Eigen::Dynamic, int NY = Eigen::Dynamic>
+ struct Functor {
+ using Scalar = _Scalar;
+ enum { InputsAtCompileTime = NX, ValuesAtCompileTime = NY };
+ using InputType = Eigen::Matrix<_Scalar, InputsAtCompileTime, 1>;
+ using ValueType = Eigen::Matrix<_Scalar, ValuesAtCompileTime, 1>;
+ using JacobianType =
+ Eigen::Matrix<_Scalar, ValuesAtCompileTime, InputsAtCompileTime>;
+
+ /** \brief Empty Constructor. */
+ Functor() : m_data_points_(ValuesAtCompileTime) {}
+
+ /** \brief Constructor
+ * \param[in] m_data_points number of data points to evaluate.
+ */
+ Functor(int m_data_points) : m_data_points_(m_data_points) {}
+
+ /** \brief Destructor. */
+ virtual ~Functor() {}
+
+ /** \brief Get the number of values. */
+ int
+ values() const
+ {
+ return (m_data_points_);
+ }
+
+ protected:
+ int m_data_points_;
+ };
+
+ struct OptimizationFunctor : public Functor<MatScalar> {
+ using Functor<MatScalar>::values;
+
+ /** Functor constructor
+ * \param[in] m_data_points the number of data points to evaluate
+ * \param[in,out] estimator pointer to the estimator object
+ */
+ OptimizationFunctor(int m_data_points,
+ const TransformationEstimationPointToPlaneWeighted* estimator)
+ : Functor<MatScalar>(m_data_points), estimator_(estimator)
+ {}
+
+ /** Copy constructor
+ * \param[in] src the optimization functor to copy into this
+ */
+ inline OptimizationFunctor(const OptimizationFunctor& src)
+ : Functor<MatScalar>(src.m_data_points_), estimator_()
+ {
+ *this = src;
+ }
+
+ /** Copy operator
+ * \param[in] src the optimization functor to copy into this
+ */
+ inline OptimizationFunctor&
+ operator=(const OptimizationFunctor& src)
+ {
+ Functor<MatScalar>::operator=(src);
+ estimator_ = src.estimator_;
+ return (*this);
+ }
+
+ /** \brief Destructor. */
+ virtual ~OptimizationFunctor() {}
+
+ /** Fill fvec from x. For the current state vector x fill the f values
+ * \param[in] x state vector
+ * \param[out] fvec f values vector
+ */
+ int
+ operator()(const VectorX& x, VectorX& fvec) const;
+
+ const TransformationEstimationPointToPlaneWeighted<PointSource,
+ PointTarget,
+ MatScalar>* estimator_;
+ };
+
+ struct OptimizationFunctorWithIndices : public Functor<MatScalar> {
+ using Functor<MatScalar>::values;
+
+ /** Functor constructor
+ * \param[in] m_data_points the number of data points to evaluate
+ * \param[in,out] estimator pointer to the estimator object
+ */
+ OptimizationFunctorWithIndices(
+ int m_data_points,
+ const TransformationEstimationPointToPlaneWeighted* estimator)
+ : Functor<MatScalar>(m_data_points), estimator_(estimator)
+ {}
+
+ /** Copy constructor
+ * \param[in] src the optimization functor to copy into this
+ */
+ inline OptimizationFunctorWithIndices(const OptimizationFunctorWithIndices& src)
+ : Functor<MatScalar>(src.m_data_points_), estimator_()
+ {
+ *this = src;
+ }
+
+ /** Copy operator
+ * \param[in] src the optimization functor to copy into this
+ */
+ inline OptimizationFunctorWithIndices&
+ operator=(const OptimizationFunctorWithIndices& src)
+ {
+ Functor<MatScalar>::operator=(src);
+ estimator_ = src.estimator_;
+ return (*this);
+ }
+
+ /** \brief Destructor. */
+ virtual ~OptimizationFunctorWithIndices() {}
+
+ /** Fill fvec from x. For the current state vector x fill the f values
+ * \param[in] x state vector
+ * \param[out] fvec f values vector
+ */
+ int
+ operator()(const VectorX& x, VectorX& fvec) const;
+
+ const TransformationEstimationPointToPlaneWeighted<PointSource,
+ PointTarget,
+ MatScalar>* estimator_;
+ };
+
+public:
+ PCL_MAKE_ALIGNED_OPERATOR_NEW
+};
+} // namespace registration
+} // namespace pcl
#include <pcl/registration/impl/transformation_estimation_point_to_plane_weighted.hpp>
#include <pcl/registration/transformation_estimation.h>
#include <pcl/cloud_iterator.h>
-namespace pcl
-{
- namespace registration
- {
- /** @b TransformationEstimationSVD implements SVD-based estimation of
- * the transformation aligning the given correspondences.
- *
- * \note The class is templated on the source and target point types as well as on the output scalar of the transformation matrix (i.e., float or double). Default: float.
- * \author Dirk Holz, Radu B. Rusu
- * \ingroup registration
- */
- template <typename PointSource, typename PointTarget, typename Scalar = float>
- class TransformationEstimationSVD : public TransformationEstimation<PointSource, PointTarget, Scalar>
- {
- public:
- using Ptr = shared_ptr<TransformationEstimationSVD<PointSource, PointTarget, Scalar> >;
- using ConstPtr = shared_ptr<const TransformationEstimationSVD<PointSource, PointTarget, Scalar> >;
-
- using Matrix4 = typename TransformationEstimation<PointSource, PointTarget, Scalar>::Matrix4;
-
- /** \brief Constructor
- * \param[in] use_umeyama Toggles whether or not to use 3rd party software*/
- TransformationEstimationSVD (bool use_umeyama=true):
- use_umeyama_ (use_umeyama)
- {}
-
- ~TransformationEstimationSVD () {};
-
- /** \brief Estimate a rigid rotation transformation between a source and a target point cloud using SVD.
- * \param[in] cloud_src the source point cloud dataset
- * \param[in] cloud_tgt the target point cloud dataset
- * \param[out] transformation_matrix the resultant transformation matrix
- */
- inline void
- estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- Matrix4 &transformation_matrix) const override;
-
- /** \brief Estimate a rigid rotation transformation between a source and a target point cloud using SVD.
- * \param[in] cloud_src the source point cloud dataset
- * \param[in] indices_src the vector of indices describing the points of interest in \a cloud_src
- * \param[in] cloud_tgt the target point cloud dataset
- * \param[out] transformation_matrix the resultant transformation matrix
- */
- inline void
- estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const std::vector<int> &indices_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- Matrix4 &transformation_matrix) const override;
-
- /** \brief Estimate a rigid rotation transformation between a source and a target point cloud using SVD.
- * \param[in] cloud_src the source point cloud dataset
- * \param[in] indices_src the vector of indices describing the points of interest in \a cloud_src
- * \param[in] cloud_tgt the target point cloud dataset
- * \param[in] indices_tgt the vector of indices describing the correspondences of the interest points from \a indices_src
- * \param[out] transformation_matrix the resultant transformation matrix
- */
- inline void
- estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const std::vector<int> &indices_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- const std::vector<int> &indices_tgt,
- Matrix4 &transformation_matrix) const override;
-
- /** \brief Estimate a rigid rotation transformation between a source and a target point cloud using SVD.
- * \param[in] cloud_src the source point cloud dataset
- * \param[in] cloud_tgt the target point cloud dataset
- * \param[in] correspondences the vector of correspondences between source and target point cloud
- * \param[out] transformation_matrix the resultant transformation matrix
- */
- void
- estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- const pcl::Correspondences &correspondences,
- Matrix4 &transformation_matrix) const override;
-
- protected:
-
- /** \brief Estimate a rigid rotation transformation between a source and a target
- * \param[in] source_it an iterator over the source point cloud dataset
- * \param[in] target_it an iterator over the target point cloud dataset
- * \param[out] transformation_matrix the resultant transformation matrix
- */
- void
- estimateRigidTransformation (ConstCloudIterator<PointSource>& source_it,
- ConstCloudIterator<PointTarget>& target_it,
- Matrix4 &transformation_matrix) const;
-
- /** \brief Obtain a 4x4 rigid transformation matrix from a correlation matrix H = src * tgt'
- * \param[in] cloud_src_demean the input source cloud, demeaned, in Eigen format
- * \param[in] centroid_src the input source centroid, in Eigen format
- * \param[in] cloud_tgt_demean the input target cloud, demeaned, in Eigen format
- * \param[in] centroid_tgt the input target cloud, in Eigen format
- * \param[out] transformation_matrix the resultant 4x4 rigid transformation matrix
- */
- virtual void
- getTransformationFromCorrelation (
- const Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> &cloud_src_demean,
- const Eigen::Matrix<Scalar, 4, 1> ¢roid_src,
- const Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> &cloud_tgt_demean,
- const Eigen::Matrix<Scalar, 4, 1> ¢roid_tgt,
- Matrix4 &transformation_matrix) const;
-
- bool use_umeyama_;
- };
-
- }
-}
+namespace pcl {
+namespace registration {
+/** @b TransformationEstimationSVD implements SVD-based estimation of
+ * the transformation aligning the given correspondences.
+ *
+ * \note The class is templated on the source and target point types as well as on the
+ * output scalar of the transformation matrix (i.e., float or double). Default: float.
+ * \author Dirk Holz, Radu B. Rusu
+ * \ingroup registration
+ */
+template <typename PointSource, typename PointTarget, typename Scalar = float>
+class TransformationEstimationSVD
+: public TransformationEstimation<PointSource, PointTarget, Scalar> {
+public:
+ using Ptr = shared_ptr<TransformationEstimationSVD<PointSource, PointTarget, Scalar>>;
+ using ConstPtr =
+ shared_ptr<const TransformationEstimationSVD<PointSource, PointTarget, Scalar>>;
+
+ using Matrix4 =
+ typename TransformationEstimation<PointSource, PointTarget, Scalar>::Matrix4;
+
+ /** \brief Constructor
+ * \param[in] use_umeyama Toggles whether or not to use 3rd party software*/
+ TransformationEstimationSVD(bool use_umeyama = true) : use_umeyama_(use_umeyama) {}
+
+ ~TransformationEstimationSVD(){};
+
+ /** \brief Estimate a rigid rotation transformation between a source and a target
+ * point cloud using SVD. \param[in] cloud_src the source point cloud dataset
+ * \param[in] cloud_tgt the target point cloud dataset
+ * \param[out] transformation_matrix the resultant transformation matrix
+ */
+ inline void
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ Matrix4& transformation_matrix) const override;
+
+ /** \brief Estimate a rigid rotation transformation between a source and a target
+ * point cloud using SVD. \param[in] cloud_src the source point cloud dataset
+ * \param[in] indices_src the vector of indices describing the points of interest in
+ * \a cloud_src
+ * \param[in] cloud_tgt the target point cloud dataset
+ * \param[out] transformation_matrix the resultant transformation matrix
+ */
+ inline void
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::Indices& indices_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ Matrix4& transformation_matrix) const override;
+
+ /** \brief Estimate a rigid rotation transformation between a source and a target
+ * point cloud using SVD. \param[in] cloud_src the source point cloud dataset
+ * \param[in] indices_src the vector of indices describing the points of interest in
+ * \a cloud_src
+ * \param[in] cloud_tgt the target point cloud dataset
+ * \param[in] indices_tgt the vector of indices describing the correspondences of the
+ * interest points from \a indices_src
+ * \param[out] transformation_matrix the resultant transformation matrix
+ */
+ inline void
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::Indices& indices_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ const pcl::Indices& indices_tgt,
+ Matrix4& transformation_matrix) const override;
+
+ /** \brief Estimate a rigid rotation transformation between a source and a target
+ * point cloud using SVD. \param[in] cloud_src the source point cloud dataset
+ * \param[in] cloud_tgt the target point cloud dataset
+ * \param[in] correspondences the vector of correspondences between source and target
+ * point cloud \param[out] transformation_matrix the resultant transformation matrix
+ */
+ void
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ const pcl::Correspondences& correspondences,
+ Matrix4& transformation_matrix) const override;
+
+protected:
+ /** \brief Estimate a rigid rotation transformation between a source and a target
+ * \param[in] source_it an iterator over the source point cloud dataset
+ * \param[in] target_it an iterator over the target point cloud dataset
+ * \param[out] transformation_matrix the resultant transformation matrix
+ */
+ void
+ estimateRigidTransformation(ConstCloudIterator<PointSource>& source_it,
+ ConstCloudIterator<PointTarget>& target_it,
+ Matrix4& transformation_matrix) const;
+
+ /** \brief Obtain a 4x4 rigid transformation matrix from a correlation matrix H = src
+ * * tgt' \param[in] cloud_src_demean the input source cloud, demeaned, in Eigen
+ * format \param[in] centroid_src the input source centroid, in Eigen format
+ * \param[in] cloud_tgt_demean the input target cloud, demeaned, in Eigen format
+ * \param[in] centroid_tgt the input target cloud, in Eigen format
+ * \param[out] transformation_matrix the resultant 4x4 rigid transformation matrix
+ */
+ virtual void
+ getTransformationFromCorrelation(
+ const Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>& cloud_src_demean,
+ const Eigen::Matrix<Scalar, 4, 1>& centroid_src,
+ const Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>& cloud_tgt_demean,
+ const Eigen::Matrix<Scalar, 4, 1>& centroid_tgt,
+ Matrix4& transformation_matrix) const;
+
+ bool use_umeyama_;
+};
+
+} // namespace registration
+} // namespace pcl
#include <pcl/registration/impl/transformation_estimation_svd.hpp>
#include <pcl/registration/transformation_estimation_svd.h>
-namespace pcl
-{
- namespace registration
- {
- /** @b TransformationEstimationSVD implements SVD-based estimation of
- * the transformation aligning the given correspondences.
- * Optionally the scale is estimated. Note that the similarity transform might not be optimal for the underlying Frobenius Norm.
- *
- * \note The class is templated on the source and target point types as well as on the output scalar of the transformation matrix (i.e., float or double). Default: float.
- * \author Suat Gedikli
- * \ingroup registration
- */
- template <typename PointSource, typename PointTarget, typename Scalar = float>
- class TransformationEstimationSVDScale : public TransformationEstimationSVD<PointSource, PointTarget, Scalar>
- {
- public:
- using Ptr = shared_ptr<TransformationEstimationSVDScale<PointSource, PointTarget, Scalar> >;
- using ConstPtr = shared_ptr<const TransformationEstimationSVDScale<PointSource, PointTarget, Scalar> >;
+namespace pcl {
+namespace registration {
+/** @b TransformationEstimationSVD implements SVD-based estimation of
+ * the transformation aligning the given correspondences.
+ * Optionally the scale is estimated. Note that the similarity transform might not be
+ * optimal for the underlying Frobenius Norm.
+ *
+ * \note The class is templated on the source and target point types as well as on the
+ * output scalar of the transformation matrix (i.e., float or double). Default: float.
+ * \author Suat Gedikli
+ * \ingroup registration
+ */
+template <typename PointSource, typename PointTarget, typename Scalar = float>
+class TransformationEstimationSVDScale
+: public TransformationEstimationSVD<PointSource, PointTarget, Scalar> {
+public:
+ using Ptr =
+ shared_ptr<TransformationEstimationSVDScale<PointSource, PointTarget, Scalar>>;
+ using ConstPtr = shared_ptr<
+ const TransformationEstimationSVDScale<PointSource, PointTarget, Scalar>>;
- using Matrix4 = typename TransformationEstimationSVD<PointSource, PointTarget, Scalar>::Matrix4;
+ using Matrix4 =
+ typename TransformationEstimationSVD<PointSource, PointTarget, Scalar>::Matrix4;
- /** \brief Inherits from TransformationEstimationSVD, but forces it to not use the Umeyama method */
- TransformationEstimationSVDScale ():
- TransformationEstimationSVD<PointSource, PointTarget, Scalar> (false)
- {}
+ /** \brief Inherits from TransformationEstimationSVD, but forces it to not use the
+ * Umeyama method */
+ TransformationEstimationSVDScale()
+ : TransformationEstimationSVD<PointSource, PointTarget, Scalar>(false)
+ {}
- protected:
- /** \brief Obtain a 4x4 rigid transformation matrix from a correlation matrix H = src * tgt'
- * \param[in] cloud_src_demean the input source cloud, demeaned, in Eigen format
- * \param[in] centroid_src the input source centroid, in Eigen format
- * \param[in] cloud_tgt_demean the input target cloud, demeaned, in Eigen format
- * \param[in] centroid_tgt the input target cloud, in Eigen format
- * \param[out] transformation_matrix the resultant 4x4 rigid transformation matrix
- */
- void
- getTransformationFromCorrelation (const Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> &cloud_src_demean,
- const Eigen::Matrix<Scalar, 4, 1> ¢roid_src,
- const Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> &cloud_tgt_demean,
- const Eigen::Matrix<Scalar, 4, 1> ¢roid_tgt,
- Matrix4 &transformation_matrix) const;
- };
+protected:
+ /** \brief Obtain a 4x4 rigid transformation matrix from a correlation matrix H = src
+ * * tgt' \param[in] cloud_src_demean the input source cloud, demeaned, in Eigen
+ * format \param[in] centroid_src the input source centroid, in Eigen format
+ * \param[in] cloud_tgt_demean the input target cloud, demeaned, in Eigen format
+ * \param[in] centroid_tgt the input target cloud, in Eigen format
+ * \param[out] transformation_matrix the resultant 4x4 rigid transformation matrix
+ */
+ void
+ getTransformationFromCorrelation(
+ const Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>& cloud_src_demean,
+ const Eigen::Matrix<Scalar, 4, 1>& centroid_src,
+ const Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>& cloud_tgt_demean,
+ const Eigen::Matrix<Scalar, 4, 1>& centroid_tgt,
+ Matrix4& transformation_matrix) const;
+};
- }
-}
+} // namespace registration
+} // namespace pcl
#include <pcl/registration/impl/transformation_estimation_svd_scale.hpp>
#pragma once
#include <pcl/registration/transformation_estimation.h>
-#include <pcl/registration/warp_point_rigid.h>
#include <pcl/cloud_iterator.h>
-namespace pcl
-{
- namespace registration
- {
- /** \brief @b TransformationEstimationSymmetricPointToPlaneLLS implements a Linear Least Squares (LLS) approximation
- * for minimizing the symmetric point-to-plane distance between two clouds of corresponding points with normals.
- *
- * For additional details, see
- * "Linear Least-Squares Optimization for Point-to-Plane ICP Surface Registration", Kok-Lim Low, 2004
- * "A Symmetric Objective Function for ICP", Szymon Rusinkiewicz, 2019
- *
- * \note The class is templated on the source and target point types as well as on the output scalar of the
- * transformation matrix (i.e., float or double). Default: float.
- * \author Matthew Cong
- * \ingroup registration
- */
- template <typename PointSource, typename PointTarget, typename Scalar = float>
- class TransformationEstimationSymmetricPointToPlaneLLS : public TransformationEstimation<PointSource, PointTarget, Scalar>
- {
- public:
- using Ptr = shared_ptr<TransformationEstimationSymmetricPointToPlaneLLS<PointSource, PointTarget, Scalar> >;
- using ConstPtr = shared_ptr<const TransformationEstimationSymmetricPointToPlaneLLS<PointSource, PointTarget, Scalar> >;
-
- using Matrix4 = typename TransformationEstimation<PointSource, PointTarget, Scalar>::Matrix4;
- using Vector6 = Eigen::Matrix<Scalar, 6, 1>;
-
- TransformationEstimationSymmetricPointToPlaneLLS () : enforce_same_direction_normals_ (true) {};
- ~TransformationEstimationSymmetricPointToPlaneLLS () {};
-
- /** \brief Estimate a rigid rotation transformation between a source and a target point cloud using SVD.
- * \param[in] cloud_src the source point cloud dataset
- * \param[in] cloud_tgt the target point cloud dataset
- * \param[out] transformation_matrix the resultant transformation matrix
- */
- inline void
- estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- Matrix4 &transformation_matrix) const override;
-
- /** \brief Estimate a rigid rotation transformation between a source and a target point cloud using SVD.
- * \param[in] cloud_src the source point cloud dataset
- * \param[in] indices_src the vector of indices describing the points of interest in \a cloud_src
- * \param[in] cloud_tgt the target point cloud dataset
- * \param[out] transformation_matrix the resultant transformation matrix
- */
- inline void
- estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const std::vector<int> &indices_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- Matrix4 &transformation_matrix) const override;
-
- /** \brief Estimate a rigid rotation transformation between a source and a target point cloud using SVD.
- * \param[in] cloud_src the source point cloud dataset
- * \param[in] indices_src the vector of indices describing the points of interest in \a cloud_src
- * \param[in] cloud_tgt the target point cloud dataset
- * \param[in] indices_tgt the vector of indices describing the correspondences of the interest points from \a indices_src
- * \param[out] transformation_matrix the resultant transformation matrix
- */
- inline void
- estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const std::vector<int> &indices_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- const std::vector<int> &indices_tgt,
- Matrix4 &transformation_matrix) const override;
-
- /** \brief Estimate a rigid rotation transformation between a source and a target point cloud using SVD.
- * \param[in] cloud_src the source point cloud dataset
- * \param[in] cloud_tgt the target point cloud dataset
- * \param[in] correspondences the vector of correspondences between source and target point cloud
- * \param[out] transformation_matrix the resultant transformation matrix
- */
- inline void
- estimateRigidTransformation (
- const pcl::PointCloud<PointSource> &cloud_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt,
- const pcl::Correspondences &correspondences,
- Matrix4 &transformation_matrix) const override;
-
- /** \brief Set whether or not to negate source or target normals on a per-point basis such that they point in the same direction.
- * \param[in] enforce_same_direction_normals whether to negate source or target normals on a per-point basis such that they point in the same direction.
- */
- inline void
- setEnforceSameDirectionNormals (bool enforce_same_direction_normals);
-
- /** \brief Obtain whether source or target normals are negated on a per-point basis such that they point in the same direction or not */
- inline bool
- getEnforceSameDirectionNormals ();
-
- protected:
-
- /** \brief Estimate a rigid rotation transformation between a source and a target
- * \param[in] source_it an iterator over the source point cloud dataset
- * \param[in] target_it an iterator over the target point cloud dataset
- * \param[out] transformation_matrix the resultant transformation matrix
- */
- void
- estimateRigidTransformation (ConstCloudIterator<PointSource>& source_it,
- ConstCloudIterator<PointTarget>& target_it,
- Matrix4 &transformation_matrix) const;
-
- /** \brief Construct a 4 by 4 transformation matrix from the provided rotation and translation.
- * \param[in] parameters (alpha, beta, gamma, tx, ty, tz) specifying rotation about the x, y, and z-axis and translation along the the x, y, and z-axis respectively
- * \param[out] transformation_matrix the resultant transformation matrix
- */
- inline void
- constructTransformationMatrix (const Vector6 ¶meters,
- Matrix4 &transformation_matrix) const;
-
- /** \brief Whether or not to negate source and/or target normals such that they point in the same direction */
- bool enforce_same_direction_normals_;
-
- };
- }
-}
+namespace pcl {
+namespace registration {
+/** \brief @b TransformationEstimationSymmetricPointToPlaneLLS implements a Linear Least
+ * Squares (LLS) approximation for minimizing the symmetric point-to-plane distance
+ * between two clouds of corresponding points with normals.
+ *
+ * For additional details, see
+ * "Linear Least-Squares Optimization for Point-to-Plane ICP Surface Registration",
+ * Kok-Lim Low, 2004 "A Symmetric Objective Function for ICP", Szymon Rusinkiewicz, 2019
+ *
+ * \note The class is templated on the source and target point types as well as on the
+ * output scalar of the transformation matrix (i.e., float or double). Default: float.
+ * \author Matthew Cong
+ * \ingroup registration
+ */
+template <typename PointSource, typename PointTarget, typename Scalar = float>
+class TransformationEstimationSymmetricPointToPlaneLLS
+: public TransformationEstimation<PointSource, PointTarget, Scalar> {
+public:
+ using Ptr = shared_ptr<TransformationEstimationSymmetricPointToPlaneLLS<PointSource,
+ PointTarget,
+ Scalar>>;
+ using ConstPtr =
+ shared_ptr<const TransformationEstimationSymmetricPointToPlaneLLS<PointSource,
+ PointTarget,
+ Scalar>>;
+
+ using Matrix4 =
+ typename TransformationEstimation<PointSource, PointTarget, Scalar>::Matrix4;
+ using Vector6 = Eigen::Matrix<Scalar, 6, 1>;
+
+ TransformationEstimationSymmetricPointToPlaneLLS()
+ : enforce_same_direction_normals_(true){};
+ ~TransformationEstimationSymmetricPointToPlaneLLS(){};
+
+ /** \brief Estimate a rigid rotation transformation between a source and a target
+ * point cloud using SVD. \param[in] cloud_src the source point cloud dataset
+ * \param[in] cloud_tgt the target point cloud dataset
+ * \param[out] transformation_matrix the resultant transformation matrix
+ */
+ inline void
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ Matrix4& transformation_matrix) const override;
+
+ /** \brief Estimate a rigid rotation transformation between a source and a target
+ * point cloud using SVD. \param[in] cloud_src the source point cloud dataset
+ * \param[in] indices_src the vector of indices describing the points of interest in
+ * \a cloud_src
+ * \param[in] cloud_tgt the target point cloud dataset
+ * \param[out] transformation_matrix the resultant transformation matrix
+ */
+ inline void
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::Indices& indices_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ Matrix4& transformation_matrix) const override;
+
+ /** \brief Estimate a rigid rotation transformation between a source and a target
+ * point cloud using SVD. \param[in] cloud_src the source point cloud dataset
+ * \param[in] indices_src the vector of indices describing the points of interest in
+ * \a cloud_src
+ * \param[in] cloud_tgt the target point cloud dataset
+ * \param[in] indices_tgt the vector of indices describing the correspondences of the
+ * interest points from \a indices_src
+ * \param[out] transformation_matrix the resultant transformation matrix
+ */
+ inline void
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::Indices& indices_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ const pcl::Indices& indices_tgt,
+ Matrix4& transformation_matrix) const override;
+
+ /** \brief Estimate a rigid rotation transformation between a source and a target
+ * point cloud using SVD. \param[in] cloud_src the source point cloud dataset
+ * \param[in] cloud_tgt the target point cloud dataset
+ * \param[in] correspondences the vector of correspondences between source and target
+ * point cloud \param[out] transformation_matrix the resultant transformation matrix
+ */
+ inline void
+ estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt,
+ const pcl::Correspondences& correspondences,
+ Matrix4& transformation_matrix) const override;
+
+ /** \brief Set whether or not to negate source or target normals on a per-point basis
+ * such that they point in the same direction. \param[in]
+ * enforce_same_direction_normals whether to negate source or target normals on a
+ * per-point basis such that they point in the same direction.
+ */
+ inline void
+ setEnforceSameDirectionNormals(bool enforce_same_direction_normals);
+
+ /** \brief Obtain whether source or target normals are negated on a per-point basis
+ * such that they point in the same direction or not */
+ inline bool
+ getEnforceSameDirectionNormals();
+
+protected:
+ /** \brief Estimate a rigid rotation transformation between a source and a target
+ * \param[in] source_it an iterator over the source point cloud dataset
+ * \param[in] target_it an iterator over the target point cloud dataset
+ * \param[out] transformation_matrix the resultant transformation matrix
+ */
+ void
+ estimateRigidTransformation(ConstCloudIterator<PointSource>& source_it,
+ ConstCloudIterator<PointTarget>& target_it,
+ Matrix4& transformation_matrix) const;
+
+ /** \brief Construct a 4 by 4 transformation matrix from the provided rotation and
+ * translation. \param[in] parameters (alpha, beta, gamma, tx, ty, tz) specifying
+ * rotation about the x, y, and z-axis and translation along the the x, y, and z-axis
+ * respectively \param[out] transformation_matrix the resultant transformation matrix
+ */
+ inline void
+ constructTransformationMatrix(const Vector6& parameters,
+ Matrix4& transformation_matrix) const;
+
+ /** \brief Whether or not to negate source and/or target normals such that they point
+ * in the same direction */
+ bool enforce_same_direction_normals_;
+};
+} // namespace registration
+} // namespace pcl
#include <pcl/registration/impl/transformation_estimation_symmetric_point_to_plane_lls.hpp>
#pragma once
-#include <pcl/correspondence.h>
-#include <pcl/features/feature.h>
#include <pcl/common/transforms.h>
+#include <pcl/features/feature.h>
#include <pcl/registration/correspondence_types.h>
+#include <pcl/correspondence.h>
-namespace pcl
-{
- namespace registration
- {
- /** \brief TransformationValidation represents the base class for methods
- * that validate the correctness of a transformation found through \ref TransformationEstimation.
- *
- * The inputs for a validation estimation can take any or all of the following:
- *
- * - source point cloud
- * - target point cloud
- * - estimated transformation between source and target
- *
- * The output is in the form of a score or a confidence measure.
- *
- * \note The class is templated on the source and target point types as well as on the output scalar of the transformation matrix (i.e., float or double). Default: float.
- * \author Radu B. Rusu
- * \ingroup registration
- */
- template <typename PointSource, typename PointTarget, typename Scalar = float>
- class TransformationValidation
- {
- public:
- using Matrix4 = Eigen::Matrix<Scalar, 4, 4>;
- using Ptr = shared_ptr<TransformationValidation<PointSource, PointTarget, Scalar> >;
- using ConstPtr = shared_ptr<const TransformationValidation<PointSource, PointTarget, Scalar> >;
+namespace pcl {
+namespace registration {
+/** \brief TransformationValidation represents the base class for methods
+ * that validate the correctness of a transformation found through \ref
+ * TransformationEstimation.
+ *
+ * The inputs for a validation estimation can take any or all of the following:
+ *
+ * - source point cloud
+ * - target point cloud
+ * - estimated transformation between source and target
+ *
+ * The output is in the form of a score or a confidence measure.
+ *
+ * \note The class is templated on the source and target point types as well as on the
+ * output scalar of the transformation matrix (i.e., float or double). Default: float.
+ * \author Radu B. Rusu
+ * \ingroup registration
+ */
+template <typename PointSource, typename PointTarget, typename Scalar = float>
+class TransformationValidation {
+public:
+ using Matrix4 = Eigen::Matrix<Scalar, 4, 4>;
+ using Ptr = shared_ptr<TransformationValidation<PointSource, PointTarget, Scalar>>;
+ using ConstPtr =
+ shared_ptr<const TransformationValidation<PointSource, PointTarget, Scalar>>;
- using PointCloudSource = pcl::PointCloud<PointSource>;
- using PointCloudSourcePtr = typename PointCloudSource::Ptr;
- using PointCloudSourceConstPtr = typename PointCloudSource::ConstPtr;
+ using PointCloudSource = pcl::PointCloud<PointSource>;
+ using PointCloudSourcePtr = typename PointCloudSource::Ptr;
+ using PointCloudSourceConstPtr = typename PointCloudSource::ConstPtr;
- using PointCloudTarget = pcl::PointCloud<PointTarget>;
- using PointCloudTargetPtr = typename PointCloudTarget::Ptr;
- using PointCloudTargetConstPtr = typename PointCloudTarget::ConstPtr;
+ using PointCloudTarget = pcl::PointCloud<PointTarget>;
+ using PointCloudTargetPtr = typename PointCloudTarget::Ptr;
+ using PointCloudTargetConstPtr = typename PointCloudTarget::ConstPtr;
- TransformationValidation () {};
- virtual ~TransformationValidation () {};
+ TransformationValidation(){};
+ virtual ~TransformationValidation(){};
- /** \brief Validate the given transformation with respect to the input cloud data, and return a score. Pure virtual.
- *
- * \param[in] cloud_src the source point cloud dataset
- * \param[in] cloud_tgt the target point cloud dataset
- * \param[out] transformation_matrix the transformation matrix
- *
- * \return the score or confidence measure for the given
- * transformation_matrix with respect to the input data
- */
- virtual double
- validateTransformation (
- const PointCloudSourceConstPtr &cloud_src,
- const PointCloudTargetConstPtr &cloud_tgt,
- const Matrix4 &transformation_matrix) const = 0;
+ /** \brief Validate the given transformation with respect to the input cloud data, and
+ * return a score. Pure virtual.
+ *
+ * \param[in] cloud_src the source point cloud dataset
+ * \param[in] cloud_tgt the target point cloud dataset
+ * \param[out] transformation_matrix the transformation matrix
+ *
+ * \return the score or confidence measure for the given
+ * transformation_matrix with respect to the input data
+ */
+ virtual double
+ validateTransformation(const PointCloudSourceConstPtr& cloud_src,
+ const PointCloudTargetConstPtr& cloud_tgt,
+ const Matrix4& transformation_matrix) const = 0;
- /** \brief Comparator function for deciding which score is better after running the
- * validation on multiple transforms. Pure virtual.
- *
- * \note For example, for Euclidean distances smaller is better, for inliers the opposite.
- *
- * \param[in] score1 the first value
- * \param[in] score2 the second value
- *
- * \return true if score1 is better than score2
- */
- virtual bool
- operator() (const double &score1, const double &score2) const = 0;
+ /** \brief Comparator function for deciding which score is better after running the
+ * validation on multiple transforms. Pure virtual.
+ *
+ * \note For example, for Euclidean distances smaller is better, for inliers the
+ * opposite.
+ *
+ * \param[in] score1 the first value
+ * \param[in] score2 the second value
+ *
+ * \return true if score1 is better than score2
+ */
+ virtual bool
+ operator()(const double& score1, const double& score2) const = 0;
- /** \brief Check if the score is valid for a specific transformation. Pure virtual.
- *
- * \param[in] cloud_src the source point cloud dataset
- * \param[in] cloud_tgt the target point cloud dataset
- * \param[out] transformation_matrix the transformation matrix
- *
- * \return true if the transformation is valid, false otherwise.
- */
- virtual bool
- isValid (
- const PointCloudSourceConstPtr &cloud_src,
- const PointCloudTargetConstPtr &cloud_tgt,
- const Matrix4 &transformation_matrix) const = 0;
- };
- }
-}
+ /** \brief Check if the score is valid for a specific transformation. Pure virtual.
+ *
+ * \param[in] cloud_src the source point cloud dataset
+ * \param[in] cloud_tgt the target point cloud dataset
+ * \param[out] transformation_matrix the transformation matrix
+ *
+ * \return true if the transformation is valid, false otherwise.
+ */
+ virtual bool
+ isValid(const PointCloudSourceConstPtr& cloud_src,
+ const PointCloudTargetConstPtr& cloud_tgt,
+ const Matrix4& transformation_matrix) const = 0;
+};
+} // namespace registration
+} // namespace pcl
#pragma once
+#include <pcl/kdtree/kdtree.h>
+#include <pcl/registration/transformation_validation.h>
+#include <pcl/search/kdtree.h>
#include <pcl/memory.h>
#include <pcl/pcl_macros.h>
#include <pcl/point_representation.h>
-#include <pcl/search/kdtree.h>
-#include <pcl/kdtree/kdtree.h>
-#include <pcl/registration/transformation_validation.h>
-namespace pcl
-{
- namespace registration
- {
- /** \brief TransformationValidationEuclidean computes an L2SQR norm between a source and target
- * dataset.
- *
- * To prevent points with bad correspondences to contribute to the overall score, the class also
- * accepts a maximum_range parameter given via \ref setMaxRange that is used as a cutoff value for
- * nearest neighbor distance comparisons.
- *
- * The output score is normalized with respect to the number of valid correspondences found.
- *
- * Usage example:
- * \code
- * pcl::TransformationValidationEuclidean<pcl::PointXYZ, pcl::PointXYZ> tve;
- * tve.setMaxRange (0.01); // 1cm
- * double score = tve.validateTransformation (source, target, transformation);
- * \endcode
- *
- * \note The class is templated on the source and target point types as well as on the output scalar of the transformation matrix (i.e., float or double). Default: float.
- * \author Radu B. Rusu
- * \ingroup registration
- */
- template <typename PointSource, typename PointTarget, typename Scalar = float>
- class TransformationValidationEuclidean
- {
- public:
- using Matrix4 = typename TransformationValidation<PointSource, PointTarget, Scalar>::Matrix4;
-
- using Ptr = shared_ptr<TransformationValidation<PointSource, PointTarget, Scalar> >;
- using ConstPtr = shared_ptr<const TransformationValidation<PointSource, PointTarget, Scalar> >;
+namespace pcl {
+namespace registration {
+/** \brief TransformationValidationEuclidean computes an L2SQR norm between a source and
+ * target dataset.
+ *
+ * To prevent points with bad correspondences to contribute to the overall score, the
+ * class also accepts a maximum_range parameter given via \ref setMaxRange that is used
+ * as a cutoff value for nearest neighbor distance comparisons.
+ *
+ * The output score is normalized with respect to the number of valid correspondences
+ * found.
+ *
+ * Usage example:
+ * \code
+ * pcl::TransformationValidationEuclidean<pcl::PointXYZ, pcl::PointXYZ> tve;
+ * tve.setMaxRange (0.01); // 1cm
+ * double score = tve.validateTransformation (source, target, transformation);
+ * \endcode
+ *
+ * \note The class is templated on the source and target point types as well as on the
+ * output scalar of the transformation matrix (i.e., float or double). Default: float.
+ * \author Radu B. Rusu
+ * \ingroup registration
+ */
+template <typename PointSource, typename PointTarget, typename Scalar = float>
+class TransformationValidationEuclidean {
+public:
+ using Matrix4 =
+ typename TransformationValidation<PointSource, PointTarget, Scalar>::Matrix4;
+
+ using Ptr = shared_ptr<TransformationValidation<PointSource, PointTarget, Scalar>>;
+ using ConstPtr =
+ shared_ptr<const TransformationValidation<PointSource, PointTarget, Scalar>>;
- using KdTree = pcl::search::KdTree<PointTarget>;
- using KdTreePtr = typename KdTree::Ptr;
+ using KdTree = pcl::search::KdTree<PointTarget>;
+ using KdTreePtr = typename KdTree::Ptr;
- using PointRepresentationConstPtr = typename KdTree::PointRepresentationConstPtr;
+ using PointRepresentationConstPtr = typename KdTree::PointRepresentationConstPtr;
- using PointCloudSourceConstPtr = typename TransformationValidation<PointSource, PointTarget>::PointCloudSourceConstPtr;
- using PointCloudTargetConstPtr = typename TransformationValidation<PointSource, PointTarget>::PointCloudTargetConstPtr;
+ using PointCloudSourceConstPtr =
+ typename TransformationValidation<PointSource,
+ PointTarget>::PointCloudSourceConstPtr;
+ using PointCloudTargetConstPtr =
+ typename TransformationValidation<PointSource,
+ PointTarget>::PointCloudTargetConstPtr;
- /** \brief Constructor.
- * Sets the \a max_range parameter to double::max, \a threshold_ to NaN
- * and initializes the internal search \a tree to a FLANN kd-tree.
- */
- TransformationValidationEuclidean () :
- max_range_ (std::numeric_limits<double>::max ()),
- threshold_ (std::numeric_limits<double>::quiet_NaN ()),
- tree_ (new pcl::search::KdTree<PointTarget>),
- force_no_recompute_ (false)
- {
- }
+ /** \brief Constructor.
+ * Sets the \a max_range parameter to double::max, \a threshold_ to NaN
+ * and initializes the internal search \a tree to a FLANN kd-tree.
+ */
+ TransformationValidationEuclidean()
+ : max_range_(std::numeric_limits<double>::max())
+ , threshold_(std::numeric_limits<double>::quiet_NaN())
+ , tree_(new pcl::search::KdTree<PointTarget>)
+ , force_no_recompute_(false)
+ {}
- virtual ~TransformationValidationEuclidean () {};
+ virtual ~TransformationValidationEuclidean(){};
+
+ /** \brief Set the maximum allowable distance between a point and its correspondence
+ * in the target in order for a correspondence to be considered \a valid. Default:
+ * double::max. \param[in] max_range the new maximum allowable distance
+ */
+ inline void
+ setMaxRange(double max_range)
+ {
+ max_range_ = max_range;
+ }
- /** \brief Set the maximum allowable distance between a point and its correspondence in the
- * target in order for a correspondence to be considered \a valid. Default: double::max.
- * \param[in] max_range the new maximum allowable distance
- */
- inline void
- setMaxRange (double max_range)
- {
- max_range_ = max_range;
- }
+ /** \brief Get the maximum allowable distance between a point and its
+ * correspondence, as set by the user.
+ */
+ inline double
+ getMaxRange()
+ {
+ return (max_range_);
+ }
- /** \brief Get the maximum allowable distance between a point and its
- * correspondence, as set by the user.
- */
- inline double
- getMaxRange ()
- {
- return (max_range_);
- }
+ /** \brief Provide a pointer to the search object used to find correspondences in
+ * the target cloud.
+ * \param[in] tree a pointer to the spatial search object.
+ * \param[in] force_no_recompute If set to true, this tree will NEVER be
+ * recomputed, regardless of calls to setInputTarget. Only use if you are
+ * confident that the tree will be set correctly.
+ */
+ inline void
+ setSearchMethodTarget(const KdTreePtr& tree, bool force_no_recompute = false)
+ {
+ tree_ = tree;
+ force_no_recompute_ = force_no_recompute;
+ }
+ /** \brief Set a threshold for which a specific transformation is considered valid.
+ *
+ * \note Since we're using MSE (Mean Squared Error) as a metric, the threshold
+ * represents the mean Euclidean distance threshold over all nearest neighbors
+ * up to max_range.
+ *
+ * \param[in] threshold the threshold for which a transformation is vali
+ */
+ inline void
+ setThreshold(double threshold)
+ {
+ threshold_ = threshold;
+ }
- /** \brief Provide a pointer to the search object used to find correspondences in
- * the target cloud.
- * \param[in] tree a pointer to the spatial search object.
- * \param[in] force_no_recompute If set to true, this tree will NEVER be
- * recomputed, regardless of calls to setInputTarget. Only use if you are
- * confident that the tree will be set correctly.
- */
- inline void
- setSearchMethodTarget (const KdTreePtr &tree,
- bool force_no_recompute = false)
- {
- tree_ = tree;
- if (force_no_recompute)
- {
- force_no_recompute_ = true;
- }
- }
+ /** \brief Get the threshold for which a specific transformation is valid. */
+ inline double
+ getThreshold()
+ {
+ return (threshold_);
+ }
- /** \brief Set a threshold for which a specific transformation is considered valid.
- *
- * \note Since we're using MSE (Mean Squared Error) as a metric, the threshold
- * represents the mean Euclidean distance threshold over all nearest neighbors
- * up to max_range.
- *
- * \param[in] threshold the threshold for which a transformation is vali
- */
- inline void
- setThreshold (double threshold)
- {
- threshold_ = threshold;
- }
+ /** \brief Validate the given transformation with respect to the input cloud data, and
+ * return a score.
+ *
+ * \param[in] cloud_src the source point cloud dataset
+ * \param[in] cloud_tgt the target point cloud dataset
+ * \param[out] transformation_matrix the resultant transformation matrix
+ *
+ * \return the score or confidence measure for the given
+ * transformation_matrix with respect to the input data
+ */
+ double
+ validateTransformation(const PointCloudSourceConstPtr& cloud_src,
+ const PointCloudTargetConstPtr& cloud_tgt,
+ const Matrix4& transformation_matrix) const;
- /** \brief Get the threshold for which a specific transformation is valid. */
- inline double
- getThreshold ()
- {
- return (threshold_);
- }
+ /** \brief Comparator function for deciding which score is better after running the
+ * validation on multiple transforms.
+ *
+ * \param[in] score1 the first value
+ * \param[in] score2 the second value
+ *
+ * \return true if score1 is better than score2
+ */
+ virtual bool
+ operator()(const double& score1, const double& score2) const
+ {
+ return (score1 < score2);
+ }
- /** \brief Validate the given transformation with respect to the input cloud data, and return a score.
- *
- * \param[in] cloud_src the source point cloud dataset
- * \param[in] cloud_tgt the target point cloud dataset
- * \param[out] transformation_matrix the resultant transformation matrix
- *
- * \return the score or confidence measure for the given
- * transformation_matrix with respect to the input data
- */
- double
- validateTransformation (
- const PointCloudSourceConstPtr &cloud_src,
- const PointCloudTargetConstPtr &cloud_tgt,
- const Matrix4 &transformation_matrix) const;
+ /** \brief Check if the score is valid for a specific transformation.
+ *
+ * \param[in] cloud_src the source point cloud dataset
+ * \param[in] cloud_tgt the target point cloud dataset
+ * \param[out] transformation_matrix the transformation matrix
+ *
+ * \return true if the transformation is valid, false otherwise.
+ */
+ virtual bool
+ isValid(const PointCloudSourceConstPtr& cloud_src,
+ const PointCloudTargetConstPtr& cloud_tgt,
+ const Matrix4& transformation_matrix) const
+ {
+ if (std::isnan(threshold_)) {
+ PCL_ERROR("[pcl::TransformationValidationEuclidean::isValid] Threshold not set! "
+ "Please use setThreshold () before continuing.\n");
+ return (false);
+ }
- /** \brief Comparator function for deciding which score is better after running the
- * validation on multiple transforms.
- *
- * \param[in] score1 the first value
- * \param[in] score2 the second value
- *
- * \return true if score1 is better than score2
- */
- virtual bool
- operator() (const double &score1, const double &score2) const
- {
- return (score1 < score2);
- }
+ return (validateTransformation(cloud_src, cloud_tgt, transformation_matrix) <
+ threshold_);
+ }
- /** \brief Check if the score is valid for a specific transformation.
- *
- * \param[in] cloud_src the source point cloud dataset
- * \param[in] cloud_tgt the target point cloud dataset
- * \param[out] transformation_matrix the transformation matrix
- *
- * \return true if the transformation is valid, false otherwise.
- */
- virtual bool
- isValid (
- const PointCloudSourceConstPtr &cloud_src,
- const PointCloudTargetConstPtr &cloud_tgt,
- const Matrix4 &transformation_matrix) const
- {
- if (std::isnan (threshold_))
- {
- PCL_ERROR ("[pcl::TransformationValidationEuclidean::isValid] Threshold not set! Please use setThreshold () before continuing.");
- return (false);
- }
+protected:
+ /** \brief The maximum allowable distance between a point and its correspondence in
+ * the target in order for a correspondence to be considered \a valid. Default:
+ * double::max.
+ */
+ double max_range_;
- return (validateTransformation (cloud_src, cloud_tgt, transformation_matrix) < threshold_);
- }
+ /** \brief The threshold for which a specific transformation is valid.
+ * Set to NaN by default, as we must require the user to set it.
+ */
+ double threshold_;
- protected:
- /** \brief The maximum allowable distance between a point and its correspondence in the target
- * in order for a correspondence to be considered \a valid. Default: double::max.
- */
- double max_range_;
+ /** \brief A pointer to the spatial search object. */
+ KdTreePtr tree_;
- /** \brief The threshold for which a specific transformation is valid.
- * Set to NaN by default, as we must require the user to set it.
- */
- double threshold_;
+ /** \brief A flag which, if set, means the tree operating on the target cloud
+ * will never be recomputed*/
+ bool force_no_recompute_;
- /** \brief A pointer to the spatial search object. */
- KdTreePtr tree_;
+ /** \brief Internal point representation uses only 3D coordinates for L2 */
+ class MyPointRepresentation : public pcl::PointRepresentation<PointTarget> {
+ using pcl::PointRepresentation<PointTarget>::nr_dimensions_;
+ using pcl::PointRepresentation<PointTarget>::trivial_;
- /** \brief A flag which, if set, means the tree operating on the target cloud
- * will never be recomputed*/
- bool force_no_recompute_;
+ public:
+ using Ptr = shared_ptr<MyPointRepresentation>;
+ using ConstPtr = shared_ptr<const MyPointRepresentation>;
+ MyPointRepresentation()
+ {
+ nr_dimensions_ = 3;
+ trivial_ = true;
+ }
- /** \brief Internal point representation uses only 3D coordinates for L2 */
- class MyPointRepresentation: public pcl::PointRepresentation<PointTarget>
- {
- using pcl::PointRepresentation<PointTarget>::nr_dimensions_;
- using pcl::PointRepresentation<PointTarget>::trivial_;
- public:
- using Ptr = shared_ptr<MyPointRepresentation>;
- using ConstPtr = shared_ptr<const MyPointRepresentation>;
-
- MyPointRepresentation ()
- {
- nr_dimensions_ = 3;
- trivial_ = true;
- }
-
- /** \brief Empty destructor */
- virtual ~MyPointRepresentation () {}
+ /** \brief Empty destructor */
+ virtual ~MyPointRepresentation() {}
- virtual void
- copyToFloatArray (const PointTarget &p, float * out) const
- {
- out[0] = p.x;
- out[1] = p.y;
- out[2] = p.z;
- }
- };
+ virtual void
+ copyToFloatArray(const PointTarget& p, float* out) const
+ {
+ out[0] = p.x;
+ out[1] = p.y;
+ out[2] = p.z;
+ }
+ };
- public:
- PCL_MAKE_ALIGNED_OPERATOR_NEW
- };
- }
-}
+public:
+ PCL_MAKE_ALIGNED_OPERATOR_NEW
+};
+} // namespace registration
+} // namespace pcl
#include <pcl/registration/impl/transformation_validation_euclidean.hpp>
*/
#pragma once
-
+PCL_DEPRECATED_HEADER(1, 15, "Please include pcl/common/transforms.h directly.")
#include <pcl/common/transforms.h>
#include <pcl/point_cloud.h>
-namespace pcl
-{
- namespace registration
- {
- /** \brief @b NullEstimate struct
- * \author Nicola Fioraio
- * \ingroup registration
- */
- struct NullEstimate
- {};
+namespace pcl {
+namespace registration {
+/** \brief @b NullEstimate struct
+ * \author Nicola Fioraio
+ * \ingroup registration
+ */
+struct NullEstimate {};
- /** \brief @b PoseEstimate struct
- * \author Nicola Fioraio
- * \ingroup registration
- */
- template <typename PointT>
- struct PoseEstimate
- {
- Eigen::Matrix4f pose;
- typename pcl::PointCloud<PointT>::ConstPtr cloud;
+/** \brief @b PoseEstimate struct
+ * \author Nicola Fioraio
+ * \ingroup registration
+ */
+template <typename PointT>
+struct PoseEstimate {
+ Eigen::Matrix4f pose;
+ typename pcl::PointCloud<PointT>::ConstPtr cloud;
- PoseEstimate (const Eigen::Matrix4f& p = Eigen::Matrix4f::Identity(),
- const typename pcl::PointCloud<PointT>::ConstPtr& c = typename pcl::PointCloud<PointT>::ConstPtr())
- : pose (p), cloud (c) {}
- };
- }
-}
+ PoseEstimate(const Eigen::Matrix4f& p = Eigen::Matrix4f::Identity(),
+ const typename pcl::PointCloud<PointT>::ConstPtr& c =
+ typename pcl::PointCloud<PointT>::ConstPtr())
+ : pose(p), cloud(c)
+ {}
+};
+} // namespace registration
+} // namespace pcl
#include <pcl/memory.h>
#include <pcl/pcl_macros.h>
-#include <pcl/registration/eigen.h>
-namespace pcl
-{
- namespace registration
- {
- /** \brief Base warp point class.
- *
- * \note The class is templated on the source and target point types as well as on the output scalar of the transformation matrix (i.e., float or double). Default: float.
- * \author Radu B. Rusu
- * \ingroup registration
- */
- template <typename PointSourceT, typename PointTargetT, typename Scalar = float>
- class WarpPointRigid
- {
- public:
- using Matrix4 = Eigen::Matrix<Scalar, 4, 4>;
- using VectorX = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
- using Vector4 = Eigen::Matrix<Scalar, 4, 1>;
+namespace pcl {
+namespace registration {
+/** \brief Base warp point class.
+ *
+ * \note The class is templated on the source and target point types as well as on the
+ * output scalar of the transformation matrix (i.e., float or double). Default: float.
+ * \author Radu B. Rusu
+ * \ingroup registration
+ */
+template <typename PointSourceT, typename PointTargetT, typename Scalar = float>
+class WarpPointRigid {
+public:
+ using Matrix4 = Eigen::Matrix<Scalar, 4, 4>;
+ using VectorX = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
+ using Vector4 = Eigen::Matrix<Scalar, 4, 1>;
- using Ptr = shared_ptr<WarpPointRigid<PointSourceT, PointTargetT, Scalar> >;
- using ConstPtr = shared_ptr<const WarpPointRigid<PointSourceT, PointTargetT, Scalar> >;
+ using Ptr = shared_ptr<WarpPointRigid<PointSourceT, PointTargetT, Scalar>>;
+ using ConstPtr = shared_ptr<const WarpPointRigid<PointSourceT, PointTargetT, Scalar>>;
- /** \brief Constructor
- * \param[in] nr_dim the number of dimensions
- */
- WarpPointRigid (int nr_dim)
- : nr_dim_ (nr_dim)
- , transform_matrix_ (Matrix4::Zero ())
- {
- transform_matrix_ (3, 3) = 1.0;
- };
+ /** \brief Constructor
+ * \param[in] nr_dim the number of dimensions
+ */
+ WarpPointRigid(int nr_dim) : nr_dim_(nr_dim), transform_matrix_(Matrix4::Zero())
+ {
+ transform_matrix_(3, 3) = 1.0;
+ };
- /** \brief Destructor. */
- virtual ~WarpPointRigid () {};
+ /** \brief Destructor. */
+ virtual ~WarpPointRigid(){};
- /** \brief Set warp parameters. Pure virtual.
- * \param[in] p warp parameters
- */
- virtual void
- setParam (const VectorX& p) = 0;
+ /** \brief Set warp parameters. Pure virtual.
+ * \param[in] p warp parameters
+ */
+ virtual void
+ setParam(const VectorX& p) = 0;
- /** \brief Warp a point given a transformation matrix
- * \param[in] pnt_in the point to warp (transform)
- * \param[out] pnt_out the warped (transformed) point
- */
- inline void
- warpPoint (const PointSourceT& pnt_in, PointSourceT& pnt_out) const
- {
- pnt_out.x = static_cast<float> (transform_matrix_ (0, 0) * pnt_in.x + transform_matrix_ (0, 1) * pnt_in.y + transform_matrix_ (0, 2) * pnt_in.z + transform_matrix_ (0, 3));
- pnt_out.y = static_cast<float> (transform_matrix_ (1, 0) * pnt_in.x + transform_matrix_ (1, 1) * pnt_in.y + transform_matrix_ (1, 2) * pnt_in.z + transform_matrix_ (1, 3));
- pnt_out.z = static_cast<float> (transform_matrix_ (2, 0) * pnt_in.x + transform_matrix_ (2, 1) * pnt_in.y + transform_matrix_ (2, 2) * pnt_in.z + transform_matrix_ (2, 3));
- //pnt_out.getVector3fMap () = transform_matrix_.topLeftCorner (3, 3) *
- // pnt_in.getVector3fMap () +
- // transform_matrix_.block (0, 3, 3, 1);
- //pnt_out.data[3] = pnt_in.data[3];
- }
+ /** \brief Warp a point given a transformation matrix
+ * \param[in] pnt_in the point to warp (transform)
+ * \param[out] pnt_out the warped (transformed) point
+ */
+ inline void
+ warpPoint(const PointSourceT& pnt_in, PointSourceT& pnt_out) const
+ {
+ pnt_out.x = static_cast<float>(
+ transform_matrix_(0, 0) * pnt_in.x + transform_matrix_(0, 1) * pnt_in.y +
+ transform_matrix_(0, 2) * pnt_in.z + transform_matrix_(0, 3));
+ pnt_out.y = static_cast<float>(
+ transform_matrix_(1, 0) * pnt_in.x + transform_matrix_(1, 1) * pnt_in.y +
+ transform_matrix_(1, 2) * pnt_in.z + transform_matrix_(1, 3));
+ pnt_out.z = static_cast<float>(
+ transform_matrix_(2, 0) * pnt_in.x + transform_matrix_(2, 1) * pnt_in.y +
+ transform_matrix_(2, 2) * pnt_in.z + transform_matrix_(2, 3));
+ // pnt_out.getVector3fMap () = transform_matrix_.topLeftCorner (3, 3) *
+ // pnt_in.getVector3fMap () +
+ // transform_matrix_.block (0, 3, 3, 1);
+ // pnt_out.data[3] = pnt_in.data[3];
+ }
- /** \brief Warp a point given a transformation matrix
- * \param[in] pnt_in the point to warp (transform)
- * \param[out] pnt_out the warped (transformed) point
- */
- inline void
- warpPoint (const PointSourceT& pnt_in, Vector4& pnt_out) const
- {
- pnt_out[0] = static_cast<Scalar> (transform_matrix_ (0, 0) * pnt_in.x + transform_matrix_ (0, 1) * pnt_in.y + transform_matrix_ (0, 2) * pnt_in.z + transform_matrix_ (0, 3));
- pnt_out[1] = static_cast<Scalar> (transform_matrix_ (1, 0) * pnt_in.x + transform_matrix_ (1, 1) * pnt_in.y + transform_matrix_ (1, 2) * pnt_in.z + transform_matrix_ (1, 3));
- pnt_out[2] = static_cast<Scalar> (transform_matrix_ (2, 0) * pnt_in.x + transform_matrix_ (2, 1) * pnt_in.y + transform_matrix_ (2, 2) * pnt_in.z + transform_matrix_ (2, 3));
- pnt_out[3] = 0.0;
- }
+ /** \brief Warp a point given a transformation matrix
+ * \param[in] pnt_in the point to warp (transform)
+ * \param[out] pnt_out the warped (transformed) point
+ */
+ inline void
+ warpPoint(const PointSourceT& pnt_in, Vector4& pnt_out) const
+ {
+ pnt_out[0] = static_cast<Scalar>(
+ transform_matrix_(0, 0) * pnt_in.x + transform_matrix_(0, 1) * pnt_in.y +
+ transform_matrix_(0, 2) * pnt_in.z + transform_matrix_(0, 3));
+ pnt_out[1] = static_cast<Scalar>(
+ transform_matrix_(1, 0) * pnt_in.x + transform_matrix_(1, 1) * pnt_in.y +
+ transform_matrix_(1, 2) * pnt_in.z + transform_matrix_(1, 3));
+ pnt_out[2] = static_cast<Scalar>(
+ transform_matrix_(2, 0) * pnt_in.x + transform_matrix_(2, 1) * pnt_in.y +
+ transform_matrix_(2, 2) * pnt_in.z + transform_matrix_(2, 3));
+ pnt_out[3] = 0.0;
+ }
+
+ /** \brief Get the number of dimensions. */
+ inline int
+ getDimension() const
+ {
+ return (nr_dim_);
+ }
- /** \brief Get the number of dimensions. */
- inline int
- getDimension () const { return (nr_dim_); }
+ /** \brief Get the Transform used. */
+ inline const Matrix4&
+ getTransform() const
+ {
+ return (transform_matrix_);
+ }
- /** \brief Get the Transform used. */
- inline const Matrix4&
- getTransform () const { return (transform_matrix_); }
-
- public:
- PCL_MAKE_ALIGNED_OPERATOR_NEW
+public:
+ PCL_MAKE_ALIGNED_OPERATOR_NEW
- protected:
- int nr_dim_;
- Matrix4 transform_matrix_;
- };
- } // namespace registration
+protected:
+ int nr_dim_;
+ Matrix4 transform_matrix_;
+};
+} // namespace registration
} // namespace pcl
#pragma once
-#include <pcl/registration/eigen.h>
#include <pcl/registration/warp_point_rigid.h>
-namespace pcl
-{
- namespace registration
- {
- /** \brief @b WarpPointRigid3D enables 3D (1D rotation + 2D translation)
- * transformations for points.
- *
- * \note The class is templated on the source and target point types as well as on the output scalar of the transformation matrix (i.e., float or double). Default: float.
- * \author Radu B. Rusu
- * \ingroup registration
- */
- template <typename PointSourceT, typename PointTargetT, typename Scalar = float>
- class WarpPointRigid3D : public WarpPointRigid<PointSourceT, PointTargetT, Scalar>
- {
- public:
- using Matrix4 = typename WarpPointRigid<PointSourceT, PointTargetT, Scalar>::Matrix4;
- using VectorX = typename WarpPointRigid<PointSourceT, PointTargetT, Scalar>::VectorX;
+namespace pcl {
+namespace registration {
+/** \brief @b WarpPointRigid3D enables 3D (1D rotation + 2D translation)
+ * transformations for points.
+ *
+ * \note The class is templated on the source and target point types as well as on the
+ * output scalar of the transformation matrix (i.e., float or double). Default: float.
+ * \author Radu B. Rusu
+ * \ingroup registration
+ */
+template <typename PointSourceT, typename PointTargetT, typename Scalar = float>
+class WarpPointRigid3D : public WarpPointRigid<PointSourceT, PointTargetT, Scalar> {
+public:
+ using Matrix4 = typename WarpPointRigid<PointSourceT, PointTargetT, Scalar>::Matrix4;
+ using VectorX = typename WarpPointRigid<PointSourceT, PointTargetT, Scalar>::VectorX;
- using Ptr = shared_ptr<WarpPointRigid3D<PointSourceT, PointTargetT, Scalar> >;
- using ConstPtr = shared_ptr<const WarpPointRigid3D<PointSourceT, PointTargetT, Scalar> >;
+ using Ptr = shared_ptr<WarpPointRigid3D<PointSourceT, PointTargetT, Scalar>>;
+ using ConstPtr =
+ shared_ptr<const WarpPointRigid3D<PointSourceT, PointTargetT, Scalar>>;
- /** \brief Constructor. */
- WarpPointRigid3D () : WarpPointRigid<PointSourceT, PointTargetT, Scalar> (3) {}
-
- /** \brief Empty destructor */
- ~WarpPointRigid3D () {}
+ /** \brief Constructor. */
+ WarpPointRigid3D() : WarpPointRigid<PointSourceT, PointTargetT, Scalar>(3) {}
- /** \brief Set warp parameters.
- * \param[in] p warp parameters (tx ty rz)
- */
- void
- setParam (const VectorX & p) override
- {
- assert (p.rows () == this->getDimension ());
- Matrix4 &trans = this->transform_matrix_;
+ /** \brief Empty destructor */
+ ~WarpPointRigid3D() {}
+
+ /** \brief Set warp parameters.
+ * \param[in] p warp parameters (tx ty rz)
+ */
+ void
+ setParam(const VectorX& p) override
+ {
+ assert(p.rows() == this->getDimension());
+ Matrix4& trans = this->transform_matrix_;
- trans = Matrix4::Zero ();
- trans (3, 3) = 1;
- trans (2, 2) = 1; // Rotation around the Z-axis
+ trans = Matrix4::Zero();
+ trans(3, 3) = 1;
+ trans(2, 2) = 1; // Rotation around the Z-axis
- // Copy the rotation and translation components
- trans.block (0, 3, 4, 1) = Eigen::Matrix<Scalar, 4, 1> (p[0], p[1], 0, 1.0);
+ // Copy the rotation and translation components
+ trans.block(0, 3, 4, 1) = Eigen::Matrix<Scalar, 4, 1>(p[0], p[1], 0, 1.0);
- // Compute w from the unit quaternion
- Eigen::Rotation2D<Scalar> r (p[2]);
- trans.topLeftCorner (2, 2) = r.toRotationMatrix ();
- }
- };
+ // Compute w from the unit quaternion
+ Eigen::Rotation2D<Scalar> r(p[2]);
+ trans.topLeftCorner(2, 2) = r.toRotationMatrix();
}
-}
+};
+} // namespace registration
+} // namespace pcl
#include <pcl/registration/warp_point_rigid.h>
-namespace pcl
-{
- namespace registration
- {
- /** \brief @b WarpPointRigid3D enables 6D (3D rotation + 3D translation)
- * transformations for points.
- *
- * \note The class is templated on the source and target point types as well as on the output scalar of the transformation matrix (i.e., float or double). Default: float.
- * \author Radu B. Rusu
- * \ingroup registration
- */
- template <typename PointSourceT, typename PointTargetT, typename Scalar = float>
- class WarpPointRigid6D : public WarpPointRigid<PointSourceT, PointTargetT, Scalar>
- {
- public:
- using WarpPointRigid<PointSourceT, PointTargetT, Scalar>::transform_matrix_;
+namespace pcl {
+namespace registration {
+/** \brief @b WarpPointRigid3D enables 6D (3D rotation + 3D translation)
+ * transformations for points.
+ *
+ * \note The class is templated on the source and target point types as well as on the
+ * output scalar of the transformation matrix (i.e., float or double). Default: float.
+ * \author Radu B. Rusu
+ * \ingroup registration
+ */
+template <typename PointSourceT, typename PointTargetT, typename Scalar = float>
+class WarpPointRigid6D : public WarpPointRigid<PointSourceT, PointTargetT, Scalar> {
+public:
+ using WarpPointRigid<PointSourceT, PointTargetT, Scalar>::transform_matrix_;
- using Matrix4 = typename WarpPointRigid<PointSourceT, PointTargetT, Scalar>::Matrix4;
- using VectorX = typename WarpPointRigid<PointSourceT, PointTargetT, Scalar>::VectorX;
+ using Matrix4 = typename WarpPointRigid<PointSourceT, PointTargetT, Scalar>::Matrix4;
+ using VectorX = typename WarpPointRigid<PointSourceT, PointTargetT, Scalar>::VectorX;
- using Ptr = shared_ptr<WarpPointRigid6D<PointSourceT, PointTargetT, Scalar> >;
- using ConstPtr = shared_ptr<const WarpPointRigid6D<PointSourceT, PointTargetT, Scalar> >;
+ using Ptr = shared_ptr<WarpPointRigid6D<PointSourceT, PointTargetT, Scalar>>;
+ using ConstPtr =
+ shared_ptr<const WarpPointRigid6D<PointSourceT, PointTargetT, Scalar>>;
- WarpPointRigid6D () : WarpPointRigid<PointSourceT, PointTargetT, Scalar> (6) {}
-
- /** \brief Empty destructor */
- ~WarpPointRigid6D () {}
+ WarpPointRigid6D() : WarpPointRigid<PointSourceT, PointTargetT, Scalar>(6) {}
+
+ /** \brief Empty destructor */
+ ~WarpPointRigid6D() {}
+
+ /** \brief Set warp parameters.
+ * \note Assumes the quaternion parameters are normalized.
+ * \param[in] p warp parameters (tx ty tz qx qy qz)
+ */
+ void
+ setParam(const VectorX& p) override
+ {
+ assert(p.rows() == this->getDimension());
- /** \brief Set warp parameters.
- * \note Assumes the quaternion parameters are normalized.
- * \param[in] p warp parameters (tx ty tz qx qy qz)
- */
- void
- setParam (const VectorX& p) override
- {
- assert (p.rows () == this->getDimension ());
+ // Copy the rotation and translation components
+ transform_matrix_.setZero();
+ transform_matrix_(0, 3) = p[0];
+ transform_matrix_(1, 3) = p[1];
+ transform_matrix_(2, 3) = p[2];
+ transform_matrix_(3, 3) = 1;
- // Copy the rotation and translation components
- transform_matrix_.setZero ();
- transform_matrix_ (0, 3) = p[0];
- transform_matrix_ (1, 3) = p[1];
- transform_matrix_ (2, 3) = p[2];
- transform_matrix_ (3, 3) = 1;
-
- // Compute w from the unit quaternion
- Eigen::Quaternion<Scalar> q (0, p[3], p[4], p[5]);
- q.w () = static_cast<Scalar> (std::sqrt (1 - q.dot (q)));
- q.normalize ();
- transform_matrix_.topLeftCorner (3, 3) = q.toRotationMatrix ();
- }
- };
+ // Compute w from the unit quaternion
+ Eigen::Quaternion<Scalar> q(0, p[3], p[4], p[5]);
+ q.w() = static_cast<Scalar>(std::sqrt(1 - q.dot(q)));
+ q.normalize();
+ transform_matrix_.topLeftCorner(3, 3) = q.toRotationMatrix();
}
-}
+};
+} // namespace registration
+} // namespace pcl
a given threshold, the registration is said to be complete.
The <b>pcl_registration</b> library implements a plethora of point cloud
-registration algorithms for both organized an unorganized (general purpose)
+registration algorithms for both organized and unorganized (general purpose)
datasets.
\image html http://www.pointclouds.org/assets/images/contents/documentation/registration_outdoor.png
//////////////////////////////////////////////////////////////////////////////////////////////
void
-pcl::registration::CorrespondenceRejectorDistance::getRemainingCorrespondences (
+pcl::registration::CorrespondenceRejectorDistance::getRemainingCorrespondences(
const pcl::Correspondences& original_correspondences,
pcl::Correspondences& remaining_correspondences)
{
unsigned int number_valid_correspondences = 0;
- remaining_correspondences.resize (original_correspondences.size ());
- for (const auto &original_correspondence : original_correspondences)
- {
- if (data_container_)
- {
- if (data_container_->getCorrespondenceScore (original_correspondence) < max_distance_)
- {
- remaining_correspondences[number_valid_correspondences] = original_correspondence;
+ remaining_correspondences.resize(original_correspondences.size());
+ for (const auto& original_correspondence : original_correspondences) {
+ if (data_container_) {
+ if (data_container_->getCorrespondenceScore(original_correspondence) <
+ max_distance_) {
+ remaining_correspondences[number_valid_correspondences] =
+ original_correspondence;
++number_valid_correspondences;
}
}
- else
- {
- if (original_correspondence.distance < max_distance_)
- {
- remaining_correspondences[number_valid_correspondences] = original_correspondence;
+ else {
+ if (original_correspondence.distance < max_distance_) {
+ remaining_correspondences[number_valid_correspondences] =
+ original_correspondence;
++number_valid_correspondences;
}
}
}
- remaining_correspondences.resize (number_valid_correspondences);
+ remaining_correspondences.resize(number_valid_correspondences);
}
//////////////////////////////////////////////////////////////////////////////////////////////
void
-pcl::registration::CorrespondenceRejectorFeatures::getRemainingCorrespondences (
+pcl::registration::CorrespondenceRejectorFeatures::getRemainingCorrespondences(
const pcl::Correspondences& original_correspondences,
pcl::Correspondences& remaining_correspondences)
{
unsigned int number_valid_correspondences = 0;
- remaining_correspondences.resize (original_correspondences.size ());
+ remaining_correspondences.resize(original_correspondences.size());
// For each set of features, go over each correspondence from input_correspondences_
- for (std::size_t i = 0; i < input_correspondences_->size (); ++i)
- {
+ for (std::size_t i = 0; i < input_correspondences_->size(); ++i) {
// Go over the map of features
- for (FeaturesMap::const_iterator it = features_map_.begin (); it != features_map_.end (); ++it)
- {
+ for (FeaturesMap::const_iterator it = features_map_.begin();
+ it != features_map_.end();
+ ++it) {
// Check if the score in feature space is above the given threshold
- // (assume that the number of feature correspondenecs is the same as the number of point correspondences)
- if (!it->second->isCorrespondenceValid (static_cast<int> (i)))
+ // (assume that the number of feature correspondenecs is the same as the number of
+ // point correspondences)
+ if (!it->second->isCorrespondenceValid(static_cast<int>(i)))
break;
- remaining_correspondences[number_valid_correspondences] = original_correspondences[i];
+ remaining_correspondences[number_valid_correspondences] =
+ original_correspondences[i];
++number_valid_correspondences;
}
}
- remaining_correspondences.resize (number_valid_correspondences);
+ remaining_correspondences.resize(number_valid_correspondences);
}
//////////////////////////////////////////////////////////////////////////////////////////////
inline bool
-pcl::registration::CorrespondenceRejectorFeatures::hasValidFeatures ()
+pcl::registration::CorrespondenceRejectorFeatures::hasValidFeatures()
{
- if (features_map_.empty ())
+ if (features_map_.empty())
return (false);
- for (const auto &feature : features_map_)
- if (!feature.second->isValid ())
+ for (const auto& feature : features_map_)
+ if (!feature.second->isValid())
return (false);
return (true);
}
//////////////////////////////////////////////////////////////////////////////////////////////
void
-pcl::registration::CorrespondenceRejectorMedianDistance::getRemainingCorrespondences (
+pcl::registration::CorrespondenceRejectorMedianDistance::getRemainingCorrespondences(
const pcl::Correspondences& original_correspondences,
pcl::Correspondences& remaining_correspondences)
{
- std::vector <double> dists;
- dists.resize (original_correspondences.size ());
+ std::vector<double> dists;
+ dists.resize(original_correspondences.size());
- for (std::size_t i = 0; i < original_correspondences.size (); ++i)
- {
+ for (std::size_t i = 0; i < original_correspondences.size(); ++i) {
if (data_container_)
- dists[i] = data_container_->getCorrespondenceScore (original_correspondences[i]);
+ dists[i] = data_container_->getCorrespondenceScore(original_correspondences[i]);
else
dists[i] = original_correspondences[i].distance;
}
- std::vector <double> nth (dists);
- nth_element (nth.begin (), nth.begin () + (nth.size () / 2), nth.end ());
- median_distance_ = nth [nth.size () / 2];
+ std::vector<double> nth(dists);
+ nth_element(nth.begin(), nth.begin() + (nth.size() / 2), nth.end());
+ median_distance_ = nth[nth.size() / 2];
unsigned int number_valid_correspondences = 0;
- remaining_correspondences.resize (original_correspondences.size ());
+ remaining_correspondences.resize(original_correspondences.size());
- for (std::size_t i = 0; i < original_correspondences.size (); ++i)
+ for (std::size_t i = 0; i < original_correspondences.size(); ++i)
if (dists[i] <= median_distance_ * factor_)
- remaining_correspondences[number_valid_correspondences++] = original_correspondences[i];
- remaining_correspondences.resize (number_valid_correspondences);
+ remaining_correspondences[number_valid_correspondences++] =
+ original_correspondences[i];
+ remaining_correspondences.resize(number_valid_correspondences);
}
*/
#include <pcl/registration/correspondence_rejection_one_to_one.h>
+#include <pcl/pcl_base.h> // for UNAVAILABLE
//////////////////////////////////////////////////////////////////////////////////////////////
void
-pcl::registration::CorrespondenceRejectorOneToOne::getRemainingCorrespondences (
+pcl::registration::CorrespondenceRejectorOneToOne::getRemainingCorrespondences(
const pcl::Correspondences& original_correspondences,
pcl::Correspondences& remaining_correspondences)
{
/* not really an efficient implementation */
pcl::Correspondences input = original_correspondences;
- std::sort (input.begin (), input.end (), pcl::registration::sortCorrespondencesByMatchIndexAndDistance ());
+ std::sort(input.begin(),
+ input.end(),
+ pcl::registration::sortCorrespondencesByMatchIndexAndDistance());
- remaining_correspondences.resize (input.size ());
- int index_last = -1;
+ remaining_correspondences.resize(input.size());
+ pcl::index_t index_last = UNAVAILABLE;
unsigned int number_valid_correspondences = 0;
- for (const auto &i : input)
- {
+ for (const auto& i : input) {
if (i.index_match < 0)
continue;
- if (i.index_match != index_last)
- {
+ if (i.index_match != index_last) {
remaining_correspondences[number_valid_correspondences] = i;
index_last = i.index_match;
++number_valid_correspondences;
}
}
- remaining_correspondences.resize (number_valid_correspondences);
+ remaining_correspondences.resize(number_valid_correspondences);
}
*/
#include <pcl/registration/correspondence_rejection_organized_boundary.h>
-#include <pcl/memory.h> // for static_pointer_cast
-
+#include <pcl/memory.h> // for static_pointer_cast
//////////////////////////////////////////////////////////////////////////////////////////////
void
-pcl::registration::CorrespondenceRejectionOrganizedBoundary::getRemainingCorrespondences (const pcl::Correspondences& original_correspondences,
- pcl::Correspondences& remaining_correspondences)
+pcl::registration::CorrespondenceRejectionOrganizedBoundary::
+ getRemainingCorrespondences(const pcl::Correspondences& original_correspondences,
+ pcl::Correspondences& remaining_correspondences)
{
- pcl::PointCloud<pcl::PointXYZ>::ConstPtr cloud = static_pointer_cast<pcl::registration::DataContainer<pcl::PointXYZ, pcl::PointNormal> >(data_container_)->getInputTarget ();
+ pcl::PointCloud<pcl::PointXYZ>::ConstPtr cloud =
+ static_pointer_cast<
+ pcl::registration::DataContainer<pcl::PointXYZ, pcl::PointNormal>>(
+ data_container_)
+ ->getInputTarget();
- if (!cloud->isOrganized ())
- {
- PCL_ERROR ("[pcl::registration::CorrespondenceRejectionOrganizedBoundary::getRemainingCorrespondences] The target cloud is not organized.\n");
- remaining_correspondences.clear ();
+ if (!cloud->isOrganized()) {
+ PCL_ERROR("[pcl::registration::CorrespondenceRejectionOrganizedBoundary::"
+ "getRemainingCorrespondences] The target cloud is not organized.\n");
+ remaining_correspondences.clear();
return;
}
- remaining_correspondences.reserve (original_correspondences.size ());
- for (const auto &original_correspondence : original_correspondences)
- {
+ remaining_correspondences.reserve(original_correspondences.size());
+ for (const auto& original_correspondence : original_correspondences) {
/// Count how many NaNs bound the target point
int x = original_correspondence.index_match % cloud->width;
int y = original_correspondence.index_match / cloud->width;
int nan_count_tgt = 0;
- for (int x_d = -window_size_/2; x_d <= window_size_/2; ++x_d)
- for (int y_d = -window_size_/2; y_d <= window_size_/2; ++y_d)
- if (x + x_d >= 0 && x + x_d < static_cast<int> (cloud->width) &&
- y + y_d >= 0 && y + y_d < static_cast<int> (cloud->height))
- {
- if (!std::isfinite ((*cloud)(x + x_d, y + y_d).z) ||
- std::fabs ((*cloud)(x, y).z - (*cloud)(x + x_d, y + y_d).z) > depth_step_threshold_)
- nan_count_tgt ++;
+ for (int x_d = -window_size_ / 2; x_d <= window_size_ / 2; ++x_d)
+ for (int y_d = -window_size_ / 2; y_d <= window_size_ / 2; ++y_d)
+ if (x + x_d >= 0 && x + x_d < static_cast<int>(cloud->width) && y + y_d >= 0 &&
+ y + y_d < static_cast<int>(cloud->height)) {
+ if (!std::isfinite((*cloud)(x + x_d, y + y_d).z) ||
+ std::fabs((*cloud)(x, y).z - (*cloud)(x + x_d, y + y_d).z) >
+ depth_step_threshold_)
+ nan_count_tgt++;
}
if (nan_count_tgt >= boundary_nans_threshold_)
continue;
-
- /// The correspondence passes both tests, add it to the filtered set of correspondences
- remaining_correspondences.push_back (original_correspondence);
+ /// The correspondence passes both tests, add it to the filtered set of
+ /// correspondences
+ remaining_correspondences.push_back(original_correspondence);
}
}
//////////////////////////////////////////////////////////////////////////////////////////////
void
-pcl::registration::CorrespondenceRejectorSurfaceNormal::getRemainingCorrespondences (
+pcl::registration::CorrespondenceRejectorSurfaceNormal::getRemainingCorrespondences(
const pcl::Correspondences& original_correspondences,
pcl::Correspondences& remaining_correspondences)
{
- if (!data_container_)
- {
- PCL_ERROR ("[pcl::registratin::%s::getRemainingCorrespondences] DataContainer object is not initialized!\n", getClassName ().c_str ());
+ if (!data_container_) {
+ PCL_ERROR("[pcl::registratin::%s::getRemainingCorrespondences] DataContainer "
+ "object is not initialized!\n",
+ getClassName().c_str());
return;
}
unsigned int number_valid_correspondences = 0;
- remaining_correspondences.resize (original_correspondences.size ());
+ remaining_correspondences.resize(original_correspondences.size());
// Test each correspondence
- for (const auto &original_correspondence : original_correspondences)
- {
- if (data_container_->getCorrespondenceScoreFromNormals (original_correspondence) > threshold_)
- remaining_correspondences[number_valid_correspondences++] = original_correspondence;
+ for (const auto& original_correspondence : original_correspondences) {
+ if (data_container_->getCorrespondenceScoreFromNormals(original_correspondence) >
+ threshold_)
+ remaining_correspondences[number_valid_correspondences++] =
+ original_correspondence;
}
- remaining_correspondences.resize (number_valid_correspondences);
+ remaining_correspondences.resize(number_valid_correspondences);
}
//////////////////////////////////////////////////////////////////////////////////////////////
void
-pcl::registration::CorrespondenceRejectorTrimmed::getRemainingCorrespondences (
+pcl::registration::CorrespondenceRejectorTrimmed::getRemainingCorrespondences(
const pcl::Correspondences& original_correspondences,
pcl::Correspondences& remaining_correspondences)
{
// not really an efficient implementation
remaining_correspondences = original_correspondences;
- unsigned int number_valid_correspondences = (int (std::floor (overlap_ratio_ * static_cast<float> (remaining_correspondences.size ()))));
- number_valid_correspondences = std::max (number_valid_correspondences, nr_min_correspondences_);
+ unsigned int number_valid_correspondences = (int(std::floor(
+ overlap_ratio_ * static_cast<float>(remaining_correspondences.size()))));
+ number_valid_correspondences =
+ std::max(number_valid_correspondences, nr_min_correspondences_);
- if (number_valid_correspondences < remaining_correspondences.size ())
- {
- std::sort (remaining_correspondences.begin (), remaining_correspondences.end (),
- pcl::registration::sortCorrespondencesByDistance ());
- remaining_correspondences.resize (number_valid_correspondences);
+ if (number_valid_correspondences < remaining_correspondences.size()) {
+ std::sort(remaining_correspondences.begin(),
+ remaining_correspondences.end(),
+ pcl::registration::sortCorrespondencesByDistance());
+ remaining_correspondences.resize(number_valid_correspondences);
}
}
* POSSIBILITY OF SUCH DAMAGE.
*
* $Id$
- *
+ *
*/
#include <pcl/registration/correspondence_rejection_var_trimmed.h>
//////////////////////////////////////////////////////////////////////////////////////////////
void
-pcl::registration::CorrespondenceRejectorVarTrimmed::getRemainingCorrespondences (
+pcl::registration::CorrespondenceRejectorVarTrimmed::getRemainingCorrespondences(
const pcl::Correspondences& original_correspondences,
pcl::Correspondences& remaining_correspondences)
{
- std::vector <double> dists;
- dists.resize (original_correspondences.size ());
+ std::vector<double> dists;
+ dists.resize(original_correspondences.size());
- for (std::size_t i = 0; i < original_correspondences.size (); ++i)
- {
- if (data_container_)
- {
- dists[i] = data_container_->getCorrespondenceScore (original_correspondences[i]);
+ for (std::size_t i = 0; i < original_correspondences.size(); ++i) {
+ if (data_container_) {
+ dists[i] = data_container_->getCorrespondenceScore(original_correspondences[i]);
}
- else
- {
+ else {
dists[i] = original_correspondences[i].distance;
}
}
- factor_ = optimizeInlierRatio (dists);
- nth_element (dists.begin (), dists.begin () + int (double (dists.size ()) * factor_), dists.end ());
- trimmed_distance_ = dists [int (double (dists.size ()) * factor_)];
+ factor_ = optimizeInlierRatio(dists);
+ nth_element(
+ dists.begin(), dists.begin() + int(double(dists.size()) * factor_), dists.end());
+ trimmed_distance_ = dists[int(double(dists.size()) * factor_)];
unsigned int number_valid_correspondences = 0;
- remaining_correspondences.resize (original_correspondences.size ());
+ remaining_correspondences.resize(original_correspondences.size());
- for (std::size_t i = 0; i < original_correspondences.size (); ++i)
- {
- if ( dists[i] < trimmed_distance_)
- {
- remaining_correspondences[number_valid_correspondences] = original_correspondences[i];
+ for (std::size_t i = 0; i < original_correspondences.size(); ++i) {
+ if (dists[i] < trimmed_distance_) {
+ remaining_correspondences[number_valid_correspondences] =
+ original_correspondences[i];
++number_valid_correspondences;
}
}
- remaining_correspondences.resize (number_valid_correspondences);
+ remaining_correspondences.resize(number_valid_correspondences);
}
//////////////////////////////////////////////////////////////////////////////////////////////
float
-pcl::registration::CorrespondenceRejectorVarTrimmed::optimizeInlierRatio (std::vector <double>& dists) const
+pcl::registration::CorrespondenceRejectorVarTrimmed::optimizeInlierRatio(
+ std::vector<double>& dists) const
{
- unsigned int points_nbr = static_cast<unsigned int> (dists.size ());
- std::sort (dists.begin (), dists.end ());
+ unsigned int points_nbr = static_cast<unsigned int>(dists.size());
+ std::sort(dists.begin(), dists.end());
- const int min_el = int (std::floor (min_ratio_ * points_nbr));
- const int max_el = int (std::floor (max_ratio_ * points_nbr));
+ const int min_el = int(std::floor(min_ratio_ * points_nbr));
+ const int max_el = int(std::floor(max_ratio_ * points_nbr));
- using LineArray = Eigen::Array <double, Eigen::Dynamic, 1>;
- Eigen::Map<LineArray> sorted_dist (&dists[0], points_nbr);
+ using LineArray = Eigen::Array<double, Eigen::Dynamic, 1>;
+ Eigen::Map<LineArray> sorted_dist(&dists[0], points_nbr);
- const LineArray trunk_sorted_dist = sorted_dist.segment (min_el, max_el-min_el);
- const double lower_sum = sorted_dist.head (min_el).sum ();
- const LineArray ids = LineArray::LinSpaced (trunk_sorted_dist.rows (), min_el+1, max_el);
+ const LineArray trunk_sorted_dist = sorted_dist.segment(min_el, max_el - min_el);
+ const double lower_sum = sorted_dist.head(min_el).sum();
+ const LineArray ids =
+ LineArray::LinSpaced(trunk_sorted_dist.rows(), min_el + 1, max_el);
const LineArray ratio = ids / points_nbr;
- const LineArray deno = ratio.pow (lambda_);
- const LineArray FRMS = deno.inverse ().square () * ids.inverse () * (lower_sum + trunk_sorted_dist);
- int min_index (0);
- FRMS.minCoeff (&min_index);
+ const LineArray deno = ratio.pow(lambda_);
+ const LineArray FRMS =
+ deno.inverse().square() * ids.inverse() * (lower_sum + trunk_sorted_dist);
+ int min_index(0);
+ FRMS.minCoeff(&min_index);
- const float opt_ratio = float (min_index + min_el) / float (points_nbr);
+ const float opt_ratio = float(min_index + min_el) / float(points_nbr);
return (opt_ratio);
}
*/
#include <pcl/registration/correspondence_types.h>
-
*/
#include <pcl/registration/gicp6d.h>
+#include <pcl/memory.h> // for pcl::make_shared
+#include <pcl/point_types_conversion.h> // for PointXYZRGBtoXYZLAB
-#include <pcl/memory.h> // for pcl::make_shared
+namespace pcl {
-namespace pcl
+GeneralizedIterativeClosestPoint6D::GeneralizedIterativeClosestPoint6D(float lab_weight)
+: cloud_lab_(new pcl::PointCloud<PointXYZLAB>)
+, target_lab_(new pcl::PointCloud<PointXYZLAB>)
+, lab_weight_(lab_weight)
{
- // convert sRGB to CIELAB
- Eigen::Vector3f
- RGB2Lab (const Eigen::Vector3i& colorRGB)
- {
- // for sRGB -> CIEXYZ see http://www.easyrgb.com/index.php?X=MATH&H=02#text2
- // for CIEXYZ -> CIELAB see http://www.easyrgb.com/index.php?X=MATH&H=07#text7
-
- double R, G, B, X, Y, Z;
-
- // map sRGB values to [0, 1]
- R = colorRGB[0] / 255.0;
- G = colorRGB[1] / 255.0;
- B = colorRGB[2] / 255.0;
-
- // linearize sRGB values
- if (R > 0.04045)
- R = pow ( (R + 0.055) / 1.055, 2.4);
- else
- R /= 12.92;
-
- if (G > 0.04045)
- G = pow ( (G + 0.055) / 1.055, 2.4);
- else
- G /= 12.92;
-
- if (B > 0.04045)
- B = pow ( (B + 0.055) / 1.055, 2.4);
- else
- B /= 12.92;
-
- // postponed:
- // R *= 100.0;
- // G *= 100.0;
- // B *= 100.0;
-
- // linear sRGB -> CIEXYZ
- X = R * 0.4124 + G * 0.3576 + B * 0.1805;
- Y = R * 0.2126 + G * 0.7152 + B * 0.0722;
- Z = R * 0.0193 + G * 0.1192 + B * 0.9505;
-
- // *= 100.0 including:
- X /= 0.95047; //95.047;
- // Y /= 1;//100.000;
- Z /= 1.08883; //108.883;
-
- // CIEXYZ -> CIELAB
- if (X > 0.008856)
- X = pow (X, 1.0 / 3.0);
- else
- X = 7.787 * X + 16.0 / 116.0;
-
- if (Y > 0.008856)
- Y = pow (Y, 1.0 / 3.0);
- else
- Y = 7.787 * Y + 16.0 / 116.0;
-
- if (Z > 0.008856)
- Z = pow (Z, 1.0 / 3.0);
- else
- Z = 7.787 * Z + 16.0 / 116.0;
-
- Eigen::Vector3f colorLab;
- colorLab[0] = static_cast<float> (116.0 * Y - 16.0);
- colorLab[1] = static_cast<float> (500.0 * (X - Y));
- colorLab[2] = static_cast<float> (200.0 * (Y - Z));
-
- return colorLab;
- }
-
- // convert a PointXYZRGBA cloud to a PointXYZLAB cloud
- void
- convertRGBAToLAB (const PointCloud<pcl::PointXYZRGBA>& in, PointCloud<PointXYZLAB>& out)
- {
- out.resize (in.size ());
-
- for (std::size_t i = 0; i < in.size (); ++i)
- {
- out[i].x = in[i].x;
- out[i].y = in[i].y;
- out[i].z = in[i].z;
- out[i].data[3] = 1.0; // important for homogeneous coordinates
+ // set rescale mask (leave x,y,z unchanged, scale L,a,b by lab_weight)
+ float alpha[6] = {1.0, 1.0, 1.0, lab_weight_, lab_weight_, lab_weight_};
+ point_rep_.setRescaleValues(alpha);
+}
- Eigen::Vector3f lab = RGB2Lab (in[i].getRGBVector3i ());
- out[i].L = lab[0];
- out[i].a = lab[1];
- out[i].b = lab[2];
- }
- }
+void
+GeneralizedIterativeClosestPoint6D::setInputSource(
+ const PointCloudSourceConstPtr& cloud)
+{
+ // call corresponding base class method
+ GeneralizedIterativeClosestPoint<PointSource, PointTarget>::setInputSource(cloud);
- GeneralizedIterativeClosestPoint6D::GeneralizedIterativeClosestPoint6D (float lab_weight) :
- cloud_lab_ (new pcl::PointCloud<PointXYZLAB>), target_lab_ (new pcl::PointCloud<PointXYZLAB>), lab_weight_ (lab_weight)
- {
- // set rescale mask (leave x,y,z unchanged, scale L,a,b by lab_weight)
- float alpha[6] = { 1.0, 1.0, 1.0, lab_weight_, lab_weight_, lab_weight_ };
- point_rep_.setRescaleValues (alpha);
+ // in addition, convert colors of the cloud to CIELAB
+ cloud_lab_->resize(cloud->size());
+ for (std::size_t point_idx = 0; point_idx < cloud->size(); ++point_idx) {
+ PointXYZRGBtoXYZLAB((*cloud)[point_idx], (*cloud_lab_)[point_idx]);
}
+}
- void
- GeneralizedIterativeClosestPoint6D::setInputSource (const PointCloudSourceConstPtr& cloud)
- {
- // call corresponding base class method
- GeneralizedIterativeClosestPoint<PointSource, PointTarget>::setInputSource (cloud);
+void
+GeneralizedIterativeClosestPoint6D::setInputTarget(
+ const PointCloudTargetConstPtr& target)
+{
+ // call corresponding base class method
+ GeneralizedIterativeClosestPoint<PointSource, PointTarget>::setInputTarget(target);
- // in addition, convert colors of the cloud to CIELAB
- convertRGBAToLAB (*cloud, *cloud_lab_);
+ // in addition, convert colors of the cloud to CIELAB...
+ target_lab_->resize(target->size());
+ for (std::size_t point_idx = 0; point_idx < target->size(); ++point_idx) {
+ PointXYZRGBtoXYZLAB((*target)[point_idx], (*target_lab_)[point_idx]);
}
- void
- GeneralizedIterativeClosestPoint6D::setInputTarget (const PointCloudTargetConstPtr& target)
- {
- // call corresponding base class method
- GeneralizedIterativeClosestPoint<PointSource, PointTarget>::setInputTarget (
- target);
-
- // in addition, convert colors of the cloud to CIELAB...
- convertRGBAToLAB (*target, *target_lab_);
-
- // ...and build 6d-tree
- target_tree_lab_.setInputCloud (target_lab_);
- target_tree_lab_.setPointRepresentation (
- pcl::make_shared<MyPointRepresentation> (point_rep_));
- }
+ // ...and build 6d-tree
+ target_tree_lab_.setInputCloud(target_lab_);
+ target_tree_lab_.setPointRepresentation(
+ pcl::make_shared<MyPointRepresentation>(point_rep_));
+}
- bool
- GeneralizedIterativeClosestPoint6D::searchForNeighbors (const PointXYZLAB& query, std::vector<int>& index,
- std::vector<float>& distance)
- {
- int k = target_tree_lab_.nearestKSearch (query, 1, index, distance);
+bool
+GeneralizedIterativeClosestPoint6D::searchForNeighbors(const PointXYZLAB& query,
+ pcl::Indices& index,
+ std::vector<float>& distance)
+{
+ int k = target_tree_lab_.nearestKSearch(query, 1, index, distance);
- // check if neighbor was found
- return (k != 0);
- }
+ // check if neighbor was found
+ return (k != 0);
+}
// taken from the original GICP class and modified slightly to make use of color values
- void
- GeneralizedIterativeClosestPoint6D::computeTransformation (PointCloudSource& output, const Eigen::Matrix4f& guess)
- {
- using namespace pcl;
-
- IterativeClosestPoint<PointSource, PointTarget>::initComputeReciprocal ();
-
- // Difference between consecutive transforms
- double delta = 0;
- // Get the size of the target
- const std::size_t N = indices_->size ();
-
- // Set the mahalanobis matrices to identity
- mahalanobis_.resize (N, Eigen::Matrix3d::Identity ());
-
- // Compute target cloud covariance matrices
- if ((!target_covariances_) || (target_covariances_->empty ()))
- {
- target_covariances_.reset (new MatricesVector);
- computeCovariances<PointTarget> (target_, tree_, *target_covariances_);
- }
- // Compute input cloud covariance matrices
- if ((!input_covariances_) || (input_covariances_->empty ()))
- {
- input_covariances_.reset (new MatricesVector);
- computeCovariances<PointSource> (input_, tree_reciprocal_,
- *input_covariances_);
- }
-
- base_transformation_ = guess;
- nr_iterations_ = 0;
- converged_ = false;
- double dist_threshold = corr_dist_threshold_ * corr_dist_threshold_;
- std::vector<int> nn_indices (1);
- std::vector<float> nn_dists (1);
+void
+GeneralizedIterativeClosestPoint6D::computeTransformation(PointCloudSource& output,
+ const Eigen::Matrix4f& guess)
+{
+ using namespace pcl;
- while (!converged_)
- {
- std::size_t cnt = 0;
- std::vector<int> source_indices (indices_->size ());
- std::vector<int> target_indices (indices_->size ());
+ IterativeClosestPoint<PointSource, PointTarget>::initComputeReciprocal();
- // guess corresponds to base_t and transformation_ to t
- Eigen::Matrix4d transform_R = Eigen::Matrix4d::Zero ();
- for (std::size_t i = 0; i < 4; i++)
- for (std::size_t j = 0; j < 4; j++)
- for (std::size_t k = 0; k < 4; k++)
- transform_R (i, j) += double (transformation_ (i, k))
- * double (guess (k, j));
+ // Difference between consecutive transforms
+ double delta = 0;
+ // Get the size of the target
+ const std::size_t N = indices_->size();
- Eigen::Matrix3d R = transform_R.topLeftCorner<3, 3> ();
+ // Set the mahalanobis matrices to identity
+ mahalanobis_.resize(N, Eigen::Matrix3d::Identity());
- for (std::size_t i = 0; i < N; i++)
- {
- // MODIFICATION: take point from the CIELAB cloud instead
- PointXYZLAB query = (*cloud_lab_)[i];
- query.getVector4fMap () = guess * query.getVector4fMap ();
- query.getVector4fMap () = transformation_ * query.getVector4fMap ();
+ // Compute target cloud covariance matrices
+ if ((!target_covariances_) || (target_covariances_->empty())) {
+ target_covariances_.reset(new MatricesVector);
+ computeCovariances<PointTarget>(target_, tree_, *target_covariances_);
+ }
+ // Compute input cloud covariance matrices
+ if ((!input_covariances_) || (input_covariances_->empty())) {
+ input_covariances_.reset(new MatricesVector);
+ computeCovariances<PointSource>(input_, tree_reciprocal_, *input_covariances_);
+ }
- if (!searchForNeighbors (query, nn_indices, nn_dists))
- {
- PCL_ERROR(
- "[pcl::%s::computeTransformation] Unable to find a nearest neighbor in the target dataset for point %d in the source!\n",
- getClassName ().c_str (), (*indices_)[i]);
- return;
- }
+ base_transformation_ = guess;
+ nr_iterations_ = 0;
+ converged_ = false;
+ double dist_threshold = corr_dist_threshold_ * corr_dist_threshold_;
+ pcl::Indices nn_indices(1);
+ std::vector<float> nn_dists(1);
+
+ while (!converged_) {
+ std::size_t cnt = 0;
+ pcl::Indices source_indices(indices_->size());
+ pcl::Indices target_indices(indices_->size());
+
+ // guess corresponds to base_t and transformation_ to t
+ Eigen::Matrix4d transform_R = Eigen::Matrix4d::Zero();
+ for (std::size_t i = 0; i < 4; i++)
+ for (std::size_t j = 0; j < 4; j++)
+ for (std::size_t k = 0; k < 4; k++)
+ transform_R(i, j) += double(transformation_(i, k)) * double(guess(k, j));
+
+ Eigen::Matrix3d R = transform_R.topLeftCorner<3, 3>();
+
+ for (std::size_t i = 0; i < N; i++) {
+ // MODIFICATION: take point from the CIELAB cloud instead
+ PointXYZLAB query = (*cloud_lab_)[i];
+ query.getVector4fMap() = guess * query.getVector4fMap();
+ query.getVector4fMap() = transformation_ * query.getVector4fMap();
+
+ if (!searchForNeighbors(query, nn_indices, nn_dists)) {
+ PCL_ERROR("[pcl::%s::computeTransformation] Unable to find a nearest neighbor "
+ "in the target dataset for point %d in the source!\n",
+ getClassName().c_str(),
+ (*indices_)[i]);
+ return;
+ }
- // Check if the distance to the nearest neighbor is smaller than the user imposed threshold
- if (nn_dists[0] < dist_threshold)
- {
- Eigen::Matrix3d &C1 = (*input_covariances_)[i];
- Eigen::Matrix3d &C2 = (*target_covariances_)[nn_indices[0]];
- Eigen::Matrix3d &M = mahalanobis_[i];
- // M = R*C1
- M = R * C1;
- // temp = M*R' + C2 = R*C1*R' + C2
- Eigen::Matrix3d temp = M * R.transpose ();
- temp += C2;
- // M = temp^-1
- M = temp.inverse ();
- source_indices[cnt] = static_cast<int> (i);
- target_indices[cnt] = nn_indices[0];
- cnt++;
- }
+ // Check if the distance to the nearest neighbor is smaller than the user imposed
+ // threshold
+ if (nn_dists[0] < dist_threshold) {
+ Eigen::Matrix3d& C1 = (*input_covariances_)[i];
+ Eigen::Matrix3d& C2 = (*target_covariances_)[nn_indices[0]];
+ Eigen::Matrix3d& M = mahalanobis_[i];
+ // M = R*C1
+ M = R * C1;
+ // temp = M*R' + C2 = R*C1*R' + C2
+ Eigen::Matrix3d temp = M * R.transpose();
+ temp += C2;
+ // M = temp^-1
+ M = temp.inverse();
+ source_indices[cnt] = static_cast<int>(i);
+ target_indices[cnt] = nn_indices[0];
+ cnt++;
}
- // Resize to the actual number of valid correspondences
- source_indices.resize (cnt);
- target_indices.resize (cnt);
- /* optimize transformation using the current assignment and Mahalanobis metrics*/
- previous_transformation_ = transformation_;
- //optimization right here
- try
- {
- rigid_transformation_estimation_ (output, source_indices, *target_,
- target_indices, transformation_);
- /* compute the delta from this iteration */
- delta = 0.;
- for (int k = 0; k < 4; k++)
- {
- for (int l = 0; l < 4; l++)
- {
- double ratio = 1;
- if (k < 3 && l < 3) // rotation part of the transform
- ratio = 1. / rotation_epsilon_;
- else
- ratio = 1. / transformation_epsilon_;
- double c_delta = ratio
- * std::abs (
- previous_transformation_ (k, l) - transformation_ (k, l));
- if (c_delta > delta)
- delta = c_delta;
- }
+ }
+ // Resize to the actual number of valid correspondences
+ source_indices.resize(cnt);
+ target_indices.resize(cnt);
+ /* optimize transformation using the current assignment and Mahalanobis metrics*/
+ previous_transformation_ = transformation_;
+ // optimization right here
+ try {
+ rigid_transformation_estimation_(
+ output, source_indices, *target_, target_indices, transformation_);
+ /* compute the delta from this iteration */
+ delta = 0.;
+ for (int k = 0; k < 4; k++) {
+ for (int l = 0; l < 4; l++) {
+ double ratio = 1;
+ if (k < 3 && l < 3) // rotation part of the transform
+ ratio = 1. / rotation_epsilon_;
+ else
+ ratio = 1. / transformation_epsilon_;
+ double c_delta =
+ ratio * std::abs(previous_transformation_(k, l) - transformation_(k, l));
+ if (c_delta > delta)
+ delta = c_delta;
}
}
- catch (PCLException &e)
- {
- PCL_DEBUG("[pcl::%s::computeTransformation] Optimization issue %s\n",
- getClassName ().c_str (), e.what ());
- break;
- }
-
- nr_iterations_++;
- // Check for convergence
- if (nr_iterations_ >= max_iterations_ || delta < 1)
- {
- converged_ = true;
- previous_transformation_ = transformation_;
- PCL_DEBUG(
- "[pcl::%s::computeTransformation] Convergence reached. Number of iterations: %d out of %d. Transformation difference: %f\n",
- getClassName ().c_str (), nr_iterations_, max_iterations_,
- (transformation_ - previous_transformation_).array ().abs ().sum ());
- }
- else
- PCL_DEBUG("[pcl::%s::computeTransformation] Convergence failed\n",
- getClassName ().c_str ());
+ } catch (PCLException& e) {
+ PCL_DEBUG("[pcl::%s::computeTransformation] Optimization issue %s\n",
+ getClassName().c_str(),
+ e.what());
+ break;
}
- //for some reason the static equivalent method raises an error
- // final_transformation_.block<3,3> (0,0) = (transformation_.block<3,3> (0,0)) * (guess.block<3,3> (0,0));
- // final_transformation_.block <3, 1> (0, 3) = transformation_.block <3, 1> (0, 3) + guess.rightCols<1>.block <3, 1> (0, 3);
- final_transformation_.topLeftCorner (3, 3) =
- previous_transformation_.topLeftCorner (3, 3)
- * guess.topLeftCorner (3, 3);
- final_transformation_ (0, 3) = previous_transformation_ (0, 3) + guess (0, 3);
- final_transformation_ (1, 3) = previous_transformation_ (1, 3) + guess (1, 3);
- final_transformation_ (2, 3) = previous_transformation_ (2, 3) + guess (2, 3);
- // Transform the point cloud
- pcl::transformPointCloud (*input_, output, final_transformation_);
+ nr_iterations_++;
+ // Check for convergence
+ if (nr_iterations_ >= max_iterations_ || delta < 1) {
+ converged_ = true;
+ previous_transformation_ = transformation_;
+ PCL_DEBUG("[pcl::%s::computeTransformation] Convergence reached. Number of "
+ "iterations: %d out of %d. Transformation difference: %f\n",
+ getClassName().c_str(),
+ nr_iterations_,
+ max_iterations_,
+ (transformation_ - previous_transformation_).array().abs().sum());
+ }
+ else
+ PCL_DEBUG("[pcl::%s::computeTransformation] Convergence failed\n",
+ getClassName().c_str());
}
-
+ // for some reason the static equivalent method raises an error
+ // final_transformation_.block<3,3> (0,0) = (transformation_.block<3,3> (0,0)) *
+ // (guess.block<3,3> (0,0)); final_transformation_.block <3, 1> (0, 3) =
+ // transformation_.block <3, 1> (0, 3) + guess.rightCols<1>.block <3, 1> (0, 3);
+ final_transformation_.topLeftCorner(3, 3) =
+ previous_transformation_.topLeftCorner(3, 3) * guess.topLeftCorner(3, 3);
+ final_transformation_(0, 3) = previous_transformation_(0, 3) + guess(0, 3);
+ final_transformation_(1, 3) = previous_transformation_(1, 3) + guess(1, 3);
+ final_transformation_(2, 3) = previous_transformation_(2, 3) + guess(2, 3);
+
+ // Transform the point cloud
+ pcl::transformPointCloud(*input_, output, final_transformation_);
}
+
+} // namespace pcl
/*
* Software License Agreement (BSD License)
- *
+ *
* Point Cloud Library (PCL) - www.pointclouds.org
* Copyright (c) 2009-2012, Willow Garage, Inc.
* Copyright (c) 2012-, Open Perception, Inc.
- *
+ *
* All rights reserved.
- *
+ *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
- *
+ *
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* * Neither the name of the copyright holder(s) nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
- *
+ *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
*
*/
-#include <pcl/point_types.h>
#include <pcl/impl/instantiate.hpp>
#include <pcl/registration/lum.h>
+// Must come after its header
#include <pcl/registration/impl/lum.hpp>
+#include <pcl/point_types.h>
PCL_INSTANTIATE(LUM, PCL_XYZ_POINT_TYPES);
*
*/
+#ifndef PCL_NO_PRECOMPILE
-#include <pcl/point_types.h>
#include <pcl/impl/instantiate.hpp>
-
#include <pcl/registration/ndt.h>
+// Must come after its header
#include <pcl/registration/impl/ndt.hpp>
+#include <pcl/point_types.h>
+
+template class PCL_EXPORTS
+ pcl::NormalDistributionsTransform<pcl::PointXYZ, pcl::PointXYZ>;
+template class PCL_EXPORTS
+ pcl::NormalDistributionsTransform<pcl::PointXYZI, pcl::PointXYZI>;
+template class PCL_EXPORTS
+ pcl::NormalDistributionsTransform<pcl::PointXYZRGB, pcl::PointXYZRGB>;
-template class PCL_EXPORTS pcl::NormalDistributionsTransform<pcl::PointXYZ, pcl::PointXYZ>;
-template class PCL_EXPORTS pcl::NormalDistributionsTransform<pcl::PointXYZI, pcl::PointXYZI>;
-template class PCL_EXPORTS pcl::NormalDistributionsTransform<pcl::PointXYZRGB, pcl::PointXYZRGB>;
+#endif
//#include <pcl/impl/instantiate.hpp>
//#include <pcl/registration/impl/ppf_registration.hpp>
-//PCL_INSTANTIATE_PRODUCT(PPFRegistration, (PCL_XYZ_POINT_TYPES)(PCL_NORMAL_POINT_TYPES));
-//#endif // PCL_NO_PRECOMPILE
+// PCL_INSTANTIATE_PRODUCT(PPFRegistration,
+// (PCL_XYZ_POINT_TYPES)(PCL_NORMAL_POINT_TYPES)); #endif // PCL_NO_PRECOMPILE
void
-pcl::PPFHashMapSearch::setInputFeatureCloud (PointCloud<PPFSignature>::ConstPtr feature_cloud)
+pcl::PPFHashMapSearch::setInputFeatureCloud(
+ PointCloud<PPFSignature>::ConstPtr feature_cloud)
{
// Discretize the feature cloud and insert it in the hash map
- feature_hash_map_->clear ();
- unsigned int n = static_cast<unsigned int> (std::sqrt (static_cast<float> (feature_cloud->size ())));
+ feature_hash_map_->clear();
+ unsigned int n =
+ static_cast<unsigned int>(std::sqrt(static_cast<float>(feature_cloud->size())));
int d1, d2, d3, d4;
max_dist_ = -1.0;
- alpha_m_.resize (n);
- for (std::size_t i = 0; i < n; ++i)
- {
- std::vector <float> alpha_m_row (n);
- for (std::size_t j = 0; j < n; ++j)
- {
- d1 = static_cast<int> (std::floor ((*feature_cloud)[i*n+j].f1 / angle_discretization_step_));
- d2 = static_cast<int> (std::floor ((*feature_cloud)[i*n+j].f2 / angle_discretization_step_));
- d3 = static_cast<int> (std::floor ((*feature_cloud)[i*n+j].f3 / angle_discretization_step_));
- d4 = static_cast<int> (std::floor ((*feature_cloud)[i*n+j].f4 / distance_discretization_step_));
- feature_hash_map_->insert (std::pair<HashKeyStruct, std::pair<std::size_t, std::size_t> > (HashKeyStruct (d1, d2, d3, d4), std::pair<std::size_t, std::size_t> (i, j)));
- alpha_m_row [j] = (*feature_cloud)[i*n + j].alpha_m;
+ alpha_m_.resize(n);
+ for (std::size_t i = 0; i < n; ++i) {
+ std::vector<float> alpha_m_row(n);
+ for (std::size_t j = 0; j < n; ++j) {
+ d1 = static_cast<int>(
+ std::floor((*feature_cloud)[i * n + j].f1 / angle_discretization_step_));
+ d2 = static_cast<int>(
+ std::floor((*feature_cloud)[i * n + j].f2 / angle_discretization_step_));
+ d3 = static_cast<int>(
+ std::floor((*feature_cloud)[i * n + j].f3 / angle_discretization_step_));
+ d4 = static_cast<int>(
+ std::floor((*feature_cloud)[i * n + j].f4 / distance_discretization_step_));
+ feature_hash_map_->insert(
+ std::pair<HashKeyStruct, std::pair<std::size_t, std::size_t>>(
+ HashKeyStruct(d1, d2, d3, d4),
+ std::pair<std::size_t, std::size_t>(i, j)));
+ alpha_m_row[j] = (*feature_cloud)[i * n + j].alpha_m;
- if (max_dist_ < (*feature_cloud)[i*n + j].f4)
- max_dist_ = (*feature_cloud)[i*n + j].f4;
+ if (max_dist_ < (*feature_cloud)[i * n + j].f4)
+ max_dist_ = (*feature_cloud)[i * n + j].f4;
}
alpha_m_[i] = alpha_m_row;
}
internals_initialized_ = true;
}
-
//////////////////////////////////////////////////////////////////////////////////////////////
void
-pcl::PPFHashMapSearch::nearestNeighborSearch (float &f1, float &f2, float &f3, float &f4,
- std::vector<std::pair<std::size_t, std::size_t> > &indices)
+pcl::PPFHashMapSearch::nearestNeighborSearch(
+ float& f1,
+ float& f2,
+ float& f3,
+ float& f4,
+ std::vector<std::pair<std::size_t, std::size_t>>& indices)
{
- if (!internals_initialized_)
- {
- PCL_ERROR("[pcl::PPFRegistration::nearestNeighborSearch]: input feature cloud has not been set - skipping search!\n");
+ if (!internals_initialized_) {
+ PCL_ERROR("[pcl::PPFRegistration::nearestNeighborSearch]: input feature cloud has "
+ "not been set - skipping search!\n");
return;
}
- int d1 = static_cast<int> (std::floor (f1 / angle_discretization_step_)),
- d2 = static_cast<int> (std::floor (f2 / angle_discretization_step_)),
- d3 = static_cast<int> (std::floor (f3 / angle_discretization_step_)),
- d4 = static_cast<int> (std::floor (f4 / distance_discretization_step_));
+ int d1 = static_cast<int>(std::floor(f1 / angle_discretization_step_)),
+ d2 = static_cast<int>(std::floor(f2 / angle_discretization_step_)),
+ d3 = static_cast<int>(std::floor(f3 / angle_discretization_step_)),
+ d4 = static_cast<int>(std::floor(f4 / distance_discretization_step_));
- indices.clear ();
- HashKeyStruct key = HashKeyStruct (d1, d2, d3, d4);
- auto map_iterator_pair = feature_hash_map_->equal_range (key);
- for (; map_iterator_pair.first != map_iterator_pair.second; ++ map_iterator_pair.first)
+ indices.clear();
+ HashKeyStruct key = HashKeyStruct(d1, d2, d3, d4);
+ auto map_iterator_pair = feature_hash_map_->equal_range(key);
+ for (; map_iterator_pair.first != map_iterator_pair.second; ++map_iterator_pair.first)
indices.emplace_back(map_iterator_pair.first->second.first,
- map_iterator_pair.first->second.second);
+ map_iterator_pair.first->second.second);
}
*
*/
-#include <pcl/point_types.h>
#include <pcl/impl/instantiate.hpp>
#include <pcl/registration/pyramid_feature_matching.h>
+// Must come after its header
#include <pcl/registration/impl/pyramid_feature_matching.hpp>
-
+#include <pcl/point_types.h>
PCL_INSTANTIATE_PRODUCT(PyramidFeatureHistogram, (PCL_FEATURE_POINT_TYPES))
*
*/
-#include <pcl/point_types.h>
-#include <pcl/registration/registration.h>
#include <pcl/registration/icp.h>
#include <pcl/registration/icp_nl.h>
+#include <pcl/registration/registration.h>
+#include <pcl/point_types.h>
/*#include <pcl/registration/correspondence_estimation.h>
#include <pcl/registration/correspondence_rejection.h>
#include <pcl/registration/correspondence_rejection_distance.h>
#include <pcl/registration/correspondence_sorting.h>
*/
-using IterativeClosestPoint = pcl::IterativeClosestPoint<pcl::PointXYZ,pcl::PointXYZ>;
-using IterativeClosestPointNonLinear = pcl::IterativeClosestPointNonLinear<pcl::PointXYZ,pcl::PointXYZ>;
-//PLUGINLIB_DECLARE_CLASS (pcl, IterativeClosestPoint, IterativeClosestPoint, nodelet::Nodelet);
-//PLUGINLIB_DECLARE_CLASS (pcl, IterativeClosestPointNonLinear, IterativeClosestPointNonLinear, nodelet::Nodelet);
-
+using IterativeClosestPoint = pcl::IterativeClosestPoint<pcl::PointXYZ, pcl::PointXYZ>;
+using IterativeClosestPointNonLinear =
+ pcl::IterativeClosestPointNonLinear<pcl::PointXYZ, pcl::PointXYZ>;
+// PLUGINLIB_DECLARE_CLASS (pcl, IterativeClosestPoint, IterativeClosestPoint,
+// nodelet::Nodelet); PLUGINLIB_DECLARE_CLASS (pcl, IterativeClosestPointNonLinear,
+// IterativeClosestPointNonLinear, nodelet::Nodelet);
*
* Point Cloud Library (PCL) - www.pointclouds.org
* Copyright (c) 2012-, Open Perception, Inc.
- *
+ *
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
#if defined __GNUC__
# pragma GCC system_header
#endif
+PCL_DEPRECATED_HEADER(1, 15, "Please include the needed boost headers directly.")
#include <boost/random.hpp>
*/
#pragma once
-
+PCL_DEPRECATED_HEADER(1, 15, "Please include the needed eigen headers directly.")
#if defined __GNUC__
# pragma GCC system_header
#endif
#define PCL_SAMPLE_CONSENSUS_IMPL_LMEDS_H_
#include <pcl/sample_consensus/lmeds.h>
+#include <pcl/common/common.h> // for computeMedian
//////////////////////////////////////////////////////////////////////////
template <typename PointT> bool
double d_best_penalty = std::numeric_limits<double>::max();
Indices selection;
- Eigen::VectorXf model_coefficients;
+ Eigen::VectorXf model_coefficients (sac_model_->getModelSize ());
std::vector<double> distances;
unsigned skipped_count = 0;
if (selection.empty ())
{
+ PCL_ERROR ("[pcl::LeastMedianSquares::computeModel] No samples could be selected!\n");
break;
}
{
//iterations_++;
++skipped_count;
+ PCL_DEBUG ("[pcl::LeastMedianSquares::computeModel] The function computeModelCoefficients failed, so continue with next iteration.\n");
continue;
}
const auto nr_valid_dists = std::distance (distances.begin (), new_end);
// d_cur_penalty = median (distances)
- const std::size_t mid = nr_valid_dists / 2;
PCL_DEBUG ("[pcl::LeastMedianSquares::computeModel] There are %lu valid distances remaining after removing NaN values.\n", nr_valid_dists);
if (nr_valid_dists == 0)
{
++skipped_count;
continue;
}
-
- // Do we have a "middle" point or should we "estimate" one ?
- if ((nr_valid_dists % 2) == 0)
- {
- // Looking at two values instead of one probably doesn't matter because they are mostly barely different, but let's do it for accuracy's sake
- std::nth_element (distances.begin (), distances.begin () + (mid - 1), new_end);
- const double tmp = distances[mid-1];
- const double tmp2 = *(std::min_element (distances.begin () + mid, new_end));
- d_cur_penalty = (sqrt (tmp) + sqrt (tmp2)) / 2.0;
- PCL_DEBUG ("[pcl::LeastMedianSquares::computeModel] Computing median with two values (%g and %g) because number of distances is even.\n", tmp, distances[mid]);
- }
- else
- {
- std::nth_element (distances.begin (), distances.begin () + mid, new_end);
- d_cur_penalty = sqrt (distances[mid]);
- PCL_DEBUG ("[pcl::LeastMedianSquares::computeModel] Computing median with one value (%g) because number of distances is odd.\n", distances[mid]);
- }
+ d_cur_penalty = pcl::computeMedian (distances.begin (), new_end, static_cast<double(*)(double)>(std::sqrt));
// Better match ?
if (d_cur_penalty < d_best_penalty)
#define PCL_SAMPLE_CONSENSUS_IMPL_MLESAC_H_
#include <pcl/sample_consensus/mlesac.h>
-#include <pcl/point_types.h>
+#include <cfloat> // for FLT_MAX
+#include <pcl/common/common.h> // for computeMedian
//////////////////////////////////////////////////////////////////////////
template <typename PointT> bool
double k = 1.0;
Indices selection;
- Eigen::VectorXf model_coefficients;
+ Eigen::VectorXf model_coefficients (sac_model_->getModelSize ());
std::vector<double> distances;
// Compute sigma - remember to set threshold_ correctly !
sigma_ = computeMedianAbsoluteDeviation (sac_model_->getInputCloud (), sac_model_->getIndices (), threshold_);
+ const double dist_scaling_factor = -1.0 / (2.0 * sigma_ * sigma_); // Precompute since this does not change
+ const double normalization_factor = 1.0 / (sqrt (2 * M_PI) * sigma_);
if (debug_verbosity_level > 1)
PCL_DEBUG ("[pcl::MaximumLikelihoodSampleConsensus::computeModel] Estimated sigma value: %f.\n", sigma_);
continue;
}
- // Use Expectiation-Maximization to find out the right value for d_cur_penalty
+ // Use Expectation-Maximization to find out the right value for d_cur_penalty
// ---[ Initial estimate for the gamma mixing parameter = 1/2
double gamma = 0.5;
double p_outlier_prob = 0;
std::vector<double> p_inlier_prob (indices_size);
for (int j = 0; j < iterations_EM_; ++j)
{
+ const double weighted_normalization_factor = gamma * normalization_factor;
// Likelihood of a datum given that it is an inlier
for (std::size_t i = 0; i < indices_size; ++i)
- p_inlier_prob[i] = gamma * std::exp (- (distances[i] * distances[i] ) / 2 * (sigma_ * sigma_) ) /
- (sqrt (2 * M_PI) * sigma_);
+ p_inlier_prob[i] = weighted_normalization_factor * std::exp ( dist_scaling_factor * distances[i] * distances[i] );
// Likelihood of a datum given that it is an outlier
p_outlier_prob = (1 - gamma) / v;
distances[i] = ptdiff.dot (ptdiff);
}
- std::sort (distances.begin (), distances.end ());
-
- double result;
- std::size_t mid = indices->size () / 2;
- // Do we have a "middle" point or should we "estimate" one ?
- if (indices->size () % 2 == 0)
- result = (sqrt (distances[mid-1]) + sqrt (distances[mid])) / 2;
- else
- result = sqrt (distances[mid]);
+ const double result = pcl::computeMedian (distances.begin (), distances.end (), static_cast<double(*)(double)>(std::sqrt));
return (sigma * result);
}
y[i] = (*cloud)[(*indices)[i]].y;
z[i] = (*cloud)[(*indices)[i]].z;
}
- std::sort (x.begin (), x.end ());
- std::sort (y.begin (), y.end ());
- std::sort (z.begin (), z.end ());
- std::size_t mid = indices->size () / 2;
- if (indices->size () % 2 == 0)
- {
- median[0] = (x[mid-1] + x[mid]) / 2;
- median[1] = (y[mid-1] + y[mid]) / 2;
- median[2] = (z[mid-1] + z[mid]) / 2;
- }
- else
- {
- median[0] = x[mid];
- median[1] = y[mid];
- median[2] = z[mid];
- }
+ median[0] = pcl::computeMedian (x.begin(), x.end());
+ median[1] = pcl::computeMedian (y.begin(), y.end());
+ median[2] = pcl::computeMedian (z.begin(), z.end());
median[3] = 0;
}
double k = 1.0;
Indices selection;
- Eigen::VectorXf model_coefficients;
+ Eigen::VectorXf model_coefficients (sac_model_->getModelSize ());
std::vector<double> distances;
int n_inliers_count = 0;
Indices inliers;
Indices selection;
- Eigen::VectorXf model_coefficients;
+ Eigen::VectorXf model_coefficients (sac_model_->getModelSize ());
// We will increase the pool so the indices_ vector can only contain m elements at first
Indices index_pool;
double k = std::numeric_limits<double>::max();
Indices selection;
- Eigen::VectorXf model_coefficients;
+ Eigen::VectorXf model_coefficients (sac_model_->getModelSize ());
const double log_probability = std::log (1.0 - probability_);
const double one_over_indices = 1.0 / static_cast<double> (sac_model_->getIndices ()->size ());
double k = 1.0;
Indices selection;
- Eigen::VectorXf model_coefficients;
+ Eigen::VectorXf model_coefficients (sac_model_->getModelSize ());
std::vector<double> distances;
std::set<index_t> indices_subset;
double k = std::numeric_limits<double>::max();
Indices selection;
- Eigen::VectorXf model_coefficients;
+ Eigen::VectorXf model_coefficients (sac_model_->getModelSize ());
std::set<index_t> indices_subset;
const double log_probability = std::log (1.0 - probability_);
#ifndef PCL_SAMPLE_CONSENSUS_IMPL_SAC_MODEL_CIRCLE_H_
#define PCL_SAMPLE_CONSENSUS_IMPL_SAC_MODEL_CIRCLE_H_
-#include <pcl/sample_consensus/eigen.h>
+#include <unsupported/Eigen/NonLinearOptimization> // for LevenbergMarquardt
#include <pcl/sample_consensus/sac_model_circle.h>
#include <pcl/common/concatenate.h>
// Radius
model_coefficients[2] = static_cast<float> (sqrt ((model_coefficients[0] - p0[0]) * (model_coefficients[0] - p0[0]) +
(model_coefficients[1] - p0[1]) * (model_coefficients[1] - p0[1])));
+ PCL_DEBUG ("[pcl::SampleConsensusModelCircle2D::computeModelCoefficients] Model is (%g,%g,%g).\n",
+ model_coefficients[0], model_coefficients[1], model_coefficients[2]);
return (true);
}
+#define AT(POS) ((*input_)[(*indices_)[(POS)]])
+
+#ifdef __AVX__
+// This function computes the squared distances (i.e. the distances without the square root) of 8 points to the center of the circle
+template <typename PointT> inline __m256 pcl::SampleConsensusModelCircle2D<PointT>::sqr_dist8 (const std::size_t i, const __m256 a_vec, const __m256 b_vec) const
+{
+ const __m256 tmp1 = _mm256_sub_ps (_mm256_set_ps (AT(i ).x, AT(i+1).x, AT(i+2).x, AT(i+3).x, AT(i+4).x, AT(i+5).x, AT(i+6).x, AT(i+7).x), a_vec);
+ const __m256 tmp2 = _mm256_sub_ps (_mm256_set_ps (AT(i ).y, AT(i+1).y, AT(i+2).y, AT(i+3).y, AT(i+4).y, AT(i+5).y, AT(i+6).y, AT(i+7).y), b_vec);
+ return _mm256_add_ps (_mm256_mul_ps (tmp1, tmp1), _mm256_mul_ps (tmp2, tmp2));
+}
+#endif // ifdef __AVX__
+
+#ifdef __SSE__
+// This function computes the squared distances (i.e. the distances without the square root) of 4 points to the center of the circle
+template <typename PointT> inline __m128 pcl::SampleConsensusModelCircle2D<PointT>::sqr_dist4 (const std::size_t i, const __m128 a_vec, const __m128 b_vec) const
+{
+ const __m128 tmp1 = _mm_sub_ps (_mm_set_ps (AT(i ).x, AT(i+1).x, AT(i+2).x, AT(i+3).x), a_vec);
+ const __m128 tmp2 = _mm_sub_ps (_mm_set_ps (AT(i ).y, AT(i+1).y, AT(i+2).y, AT(i+3).y), b_vec);
+ return _mm_add_ps (_mm_mul_ps (tmp1, tmp1), _mm_mul_ps (tmp2, tmp2));
+}
+#endif // ifdef __SSE__
+
+#undef AT
+
//////////////////////////////////////////////////////////////////////////
template <typename PointT> void
pcl::SampleConsensusModelCircle2D<PointT>::getDistancesToModel (const Eigen::VectorXf &model_coefficients, std::vector<double> &distances) const
inliers.reserve (indices_->size ());
error_sqr_dists_.reserve (indices_->size ());
+ const float sqr_inner_radius = (model_coefficients[2] <= threshold ? 0.0f : (model_coefficients[2] - threshold) * (model_coefficients[2] - threshold));
+ const float sqr_outer_radius = (model_coefficients[2] + threshold) * (model_coefficients[2] + threshold);
// Iterate through the 3d points and calculate the distances from them to the circle
for (std::size_t i = 0; i < indices_->size (); ++i)
{
- // Calculate the distance from the point to the circle as the difference between
- // dist(point,circle_origin) and circle_radius
- float distance = std::abs (std::sqrt (
- ( (*input_)[(*indices_)[i]].x - model_coefficients[0] ) *
- ( (*input_)[(*indices_)[i]].x - model_coefficients[0] ) +
-
- ( (*input_)[(*indices_)[i]].y - model_coefficients[1] ) *
- ( (*input_)[(*indices_)[i]].y - model_coefficients[1] )
- ) - model_coefficients[2]);
- if (distance < threshold)
+ // To avoid sqrt computation: consider one larger circle (radius + threshold) and one smaller circle (radius - threshold).
+ // Valid if point is in larger circle, but not in smaller circle.
+ const float sqr_dist = ( (*input_)[(*indices_)[i]].x - model_coefficients[0] ) *
+ ( (*input_)[(*indices_)[i]].x - model_coefficients[0] ) +
+ ( (*input_)[(*indices_)[i]].y - model_coefficients[1] ) *
+ ( (*input_)[(*indices_)[i]].y - model_coefficients[1] );
+ if ((sqr_dist <= sqr_outer_radius) && (sqr_dist >= sqr_inner_radius))
{
// Returns the indices of the points whose distances are smaller than the threshold
inliers.push_back ((*indices_)[i]);
- error_sqr_dists_.push_back (static_cast<double> (distance));
+ // Only compute exact distance if necessary (if point is inlier)
+ error_sqr_dists_.push_back (static_cast<double> (std::abs (std::sqrt (sqr_dist) - model_coefficients[2])));
}
}
}
// Check if the model is valid given the user constraints
if (!isModelValid (model_coefficients))
return (0);
- std::size_t nr_p = 0;
+#if defined (__AVX__) && defined (__AVX2__)
+ return countWithinDistanceAVX (model_coefficients, threshold);
+#elif defined (__SSE__) && defined (__SSE2__) && defined (__SSE4_1__)
+ return countWithinDistanceSSE (model_coefficients, threshold);
+#else
+ return countWithinDistanceStandard (model_coefficients, threshold);
+#endif
+}
+
+//////////////////////////////////////////////////////////////////////////
+template <typename PointT> std::size_t
+pcl::SampleConsensusModelCircle2D<PointT>::countWithinDistanceStandard (
+ const Eigen::VectorXf &model_coefficients, const double threshold, std::size_t i) const
+{
+ std::size_t nr_p = 0;
+ const float sqr_inner_radius = (model_coefficients[2] <= threshold ? 0.0f : (model_coefficients[2] - threshold) * (model_coefficients[2] - threshold));
+ const float sqr_outer_radius = (model_coefficients[2] + threshold) * (model_coefficients[2] + threshold);
// Iterate through the 3d points and calculate the distances from them to the circle
- for (std::size_t i = 0; i < indices_->size (); ++i)
+ for (; i < indices_->size (); ++i)
{
- // Calculate the distance from the point to the circle as the difference between
- // dist(point,circle_origin) and circle_radius
- float distance = std::abs (std::sqrt (
- ( (*input_)[(*indices_)[i]].x - model_coefficients[0] ) *
- ( (*input_)[(*indices_)[i]].x - model_coefficients[0] ) +
-
- ( (*input_)[(*indices_)[i]].y - model_coefficients[1] ) *
- ( (*input_)[(*indices_)[i]].y - model_coefficients[1] )
- ) - model_coefficients[2]);
- if (distance < threshold)
+ // To avoid sqrt computation: consider one larger circle (radius + threshold) and one smaller circle (radius - threshold).
+ // Valid if point is in larger circle, but not in smaller circle.
+ const float sqr_dist = ( (*input_)[(*indices_)[i]].x - model_coefficients[0] ) *
+ ( (*input_)[(*indices_)[i]].x - model_coefficients[0] ) +
+ ( (*input_)[(*indices_)[i]].y - model_coefficients[1] ) *
+ ( (*input_)[(*indices_)[i]].y - model_coefficients[1] );
+ if ((sqr_dist <= sqr_outer_radius) && (sqr_dist >= sqr_inner_radius))
nr_p++;
}
return (nr_p);
}
+//////////////////////////////////////////////////////////////////////////
+#if defined (__SSE__) && defined (__SSE2__) && defined (__SSE4_1__)
+template <typename PointT> std::size_t
+pcl::SampleConsensusModelCircle2D<PointT>::countWithinDistanceSSE (
+ const Eigen::VectorXf &model_coefficients, const double threshold, std::size_t i) const
+{
+ std::size_t nr_p = 0;
+ const __m128 a_vec = _mm_set1_ps (model_coefficients[0]);
+ const __m128 b_vec = _mm_set1_ps (model_coefficients[1]);
+ // To avoid sqrt computation: consider one larger circle (radius + threshold) and one smaller circle (radius - threshold). Valid if point is in larger circle, but not in smaller circle.
+ const __m128 sqr_inner_radius = _mm_set1_ps ((model_coefficients[2] <= threshold ? 0.0 : (model_coefficients[2]-threshold)*(model_coefficients[2]-threshold)));
+ const __m128 sqr_outer_radius = _mm_set1_ps ((model_coefficients[2]+threshold)*(model_coefficients[2]+threshold));
+ __m128i res = _mm_set1_epi32(0); // This corresponds to nr_p: 4 32bit integers that, summed together, hold the number of inliers
+ for (; (i + 4) <= indices_->size (); i += 4)
+ {
+ const __m128 sqr_dist = sqr_dist4 (i, a_vec, b_vec);
+ const __m128 mask = _mm_and_ps (_mm_cmplt_ps (sqr_inner_radius, sqr_dist), _mm_cmplt_ps (sqr_dist, sqr_outer_radius)); // The mask contains 1 bits if the corresponding points are inliers, else 0 bits
+ res = _mm_add_epi32 (res, _mm_and_si128 (_mm_set1_epi32 (1), _mm_castps_si128 (mask))); // The latter part creates a vector with ones (as 32bit integers) where the points are inliers
+ //const int res = _mm_movemask_ps (mask);
+ //if (res & 1) nr_p++;
+ //if (res & 2) nr_p++;
+ //if (res & 4) nr_p++;
+ //if (res & 8) nr_p++;
+ }
+ nr_p += _mm_extract_epi32 (res, 0);
+ nr_p += _mm_extract_epi32 (res, 1);
+ nr_p += _mm_extract_epi32 (res, 2);
+ nr_p += _mm_extract_epi32 (res, 3);
+
+ // Process the remaining points (at most 3)
+ nr_p += countWithinDistanceStandard (model_coefficients, threshold, i);
+ return (nr_p);
+}
+#endif
+
+//////////////////////////////////////////////////////////////////////////
+#if defined (__AVX__) && defined (__AVX2__)
+template <typename PointT> std::size_t
+pcl::SampleConsensusModelCircle2D<PointT>::countWithinDistanceAVX (
+ const Eigen::VectorXf &model_coefficients, const double threshold, std::size_t i) const
+{
+ std::size_t nr_p = 0;
+ const __m256 a_vec = _mm256_set1_ps (model_coefficients[0]);
+ const __m256 b_vec = _mm256_set1_ps (model_coefficients[1]);
+ // To avoid sqrt computation: consider one larger circle (radius + threshold) and one smaller circle (radius - threshold). Valid if point is in larger circle, but not in smaller circle.
+ const __m256 sqr_inner_radius = _mm256_set1_ps ((model_coefficients[2] <= threshold ? 0.0 : (model_coefficients[2]-threshold)*(model_coefficients[2]-threshold)));
+ const __m256 sqr_outer_radius = _mm256_set1_ps ((model_coefficients[2]+threshold)*(model_coefficients[2]+threshold));
+ __m256i res = _mm256_set1_epi32(0); // This corresponds to nr_p: 8 32bit integers that, summed together, hold the number of inliers
+ for (; (i + 8) <= indices_->size (); i += 8)
+ {
+ const __m256 sqr_dist = sqr_dist8 (i, a_vec, b_vec);
+ const __m256 mask = _mm256_and_ps (_mm256_cmp_ps (sqr_inner_radius, sqr_dist, _CMP_LT_OQ), _mm256_cmp_ps (sqr_dist, sqr_outer_radius, _CMP_LT_OQ)); // The mask contains 1 bits if the corresponding points are inliers, else 0 bits
+ res = _mm256_add_epi32 (res, _mm256_and_si256 (_mm256_set1_epi32 (1), _mm256_castps_si256 (mask))); // The latter part creates a vector with ones (as 32bit integers) where the points are inliers
+ //const int res = _mm256_movemask_ps (mask);
+ //if (res & 1) nr_p++;
+ //if (res & 2) nr_p++;
+ //if (res & 4) nr_p++;
+ //if (res & 8) nr_p++;
+ //if (res & 16) nr_p++;
+ //if (res & 32) nr_p++;
+ //if (res & 64) nr_p++;
+ //if (res & 128) nr_p++;
+ }
+ nr_p += _mm256_extract_epi32 (res, 0);
+ nr_p += _mm256_extract_epi32 (res, 1);
+ nr_p += _mm256_extract_epi32 (res, 2);
+ nr_p += _mm256_extract_epi32 (res, 3);
+ nr_p += _mm256_extract_epi32 (res, 4);
+ nr_p += _mm256_extract_epi32 (res, 5);
+ nr_p += _mm256_extract_epi32 (res, 6);
+ nr_p += _mm256_extract_epi32 (res, 7);
+
+ // Process the remaining points (at most 7)
+ nr_p += countWithinDistanceStandard (model_coefficients, threshold, i);
+ return (nr_p);
+}
+#endif
+
//////////////////////////////////////////////////////////////////////////
template <typename PointT> void
pcl::SampleConsensusModelCircle2D<PointT>::optimizeModelCoefficients (
if (copy_data_fields)
{
// Allocate enough space and copy the basics
- projected_points.points.resize (input_->size ());
+ projected_points.resize (input_->size ());
projected_points.width = input_->width;
projected_points.height = input_->height;
else
{
// Allocate enough space and copy the basics
- projected_points.points.resize (inliers.size ());
+ projected_points.resize (inliers.size ());
projected_points.width = inliers.size ();
projected_points.height = 1;
return (false);
}
+ const float sqr_inner_radius = (model_coefficients[2] <= threshold ? 0.0f : (model_coefficients[2] - threshold) * (model_coefficients[2] - threshold));
+ const float sqr_outer_radius = (model_coefficients[2] + threshold) * (model_coefficients[2] + threshold);
for (const auto &index : indices)
- // Calculate the distance from the point to the circle as the difference between
- //dist(point,circle_origin) and circle_radius
- if (std::abs (std::sqrt (
- ( (*input_)[index].x - model_coefficients[0] ) *
- ( (*input_)[index].x - model_coefficients[0] ) +
- ( (*input_)[index].y - model_coefficients[1] ) *
- ( (*input_)[index].y - model_coefficients[1] )
- ) - model_coefficients[2]) > threshold)
+ {
+ // To avoid sqrt computation: consider one larger circle (radius + threshold) and one smaller circle (radius - threshold).
+ // Valid if point is in larger circle, but not in smaller circle.
+ const float sqr_dist = ( (*input_)[index].x - model_coefficients[0] ) *
+ ( (*input_)[index].x - model_coefficients[0] ) +
+ ( (*input_)[index].y - model_coefficients[1] ) *
+ ( (*input_)[index].y - model_coefficients[1] );
+ if ((sqr_dist > sqr_outer_radius) || (sqr_dist < sqr_inner_radius))
return (false);
-
+ }
return (true);
}
return (false);
if (radius_min_ != -std::numeric_limits<double>::max() && model_coefficients[2] < radius_min_)
+ {
+ PCL_DEBUG ("[pcl::SampleConsensusModelCircle2D::isModelValid] Radius of circle is too small: should be larger than %g, but is %g.\n",
+ radius_min_, model_coefficients[2]);
return (false);
+ }
if (radius_max_ != std::numeric_limits<double>::max() && model_coefficients[2] > radius_max_)
+ {
+ PCL_DEBUG ("[pcl::SampleConsensusModelCircle2D::isModelValid] Radius of circle is too big: should be smaller than %g, but is %g.\n",
+ radius_max_, model_coefficients[2]);
return (false);
+ }
return (true);
}
#ifndef PCL_SAMPLE_CONSENSUS_IMPL_SAC_MODEL_CIRCLE_3D_HPP_
#define PCL_SAMPLE_CONSENSUS_IMPL_SAC_MODEL_CIRCLE_3D_HPP_
-#include <pcl/sample_consensus/eigen.h>
+#include <cfloat> // for DBL_MAX
+
+#include <unsupported/Eigen/NonLinearOptimization> // for LevenbergMarquardt
#include <pcl/sample_consensus/sac_model_circle3d.h>
#include <pcl/common/concatenate.h>
model_coefficients[4] = static_cast<float> (circle_normal[0]);
model_coefficients[5] = static_cast<float> (circle_normal[1]);
model_coefficients[6] = static_cast<float> (circle_normal[2]);
-
- return (true);
+
+ PCL_DEBUG ("[pcl::SampleConsensusModelCircle3D::computeModelCoefficients] Model is (%g,%g,%g,%g,%g,%g,%g).\n",
+ model_coefficients[0], model_coefficients[1], model_coefficients[2], model_coefficients[3],
+ model_coefficients[4], model_coefficients[5], model_coefficients[6]);
+ return (true);
}
//////////////////////////////////////////////////////////////////////////
inliers.clear ();
inliers.reserve (indices_->size ());
+ const auto squared_threshold = threshold * threshold;
// Iterate through the 3d points and calculate the distances from them to the sphere
for (std::size_t i = 0; i < indices_->size (); ++i)
{
Eigen::Vector3d K = C + r * helper_vectorP_projC.normalized ();
Eigen::Vector3d distanceVector = P - K;
- if (distanceVector.norm () < threshold)
+ if (distanceVector.squaredNorm () < squared_threshold)
{
// Returns the indices of the points whose distances are smaller than the threshold
inliers.push_back ((*indices_)[i]);
return (0);
std::size_t nr_p = 0;
+ const auto squared_threshold = threshold * threshold;
// Iterate through the 3d points and calculate the distances from them to the sphere
for (std::size_t i = 0; i < indices_->size (); ++i)
{
Eigen::Vector3d K = C + r * helper_vectorP_projC.normalized ();
Eigen::Vector3d distanceVector = P - K;
- if (distanceVector.norm () < threshold)
+ if (distanceVector.squaredNorm () < squared_threshold)
nr_p++;
}
return (nr_p);
if (copy_data_fields)
{
// Allocate enough space and copy the basics
- projected_points.points.resize (input_->size ());
+ projected_points.resize (input_->size ());
projected_points.width = input_->width;
projected_points.height = input_->height;
else
{
// Allocate enough space and copy the basics
- projected_points.points.resize (inliers.size ());
+ projected_points.resize (inliers.size ());
projected_points.width = inliers.size ();
projected_points.height = 1;
return (false);
}
+ const auto squared_threshold = threshold * threshold;
for (const auto &index : indices)
{
// Calculate the distance from the point to the sphere as the difference between
Eigen::Vector3d K = C + r * helper_vectorP_projC.normalized ();
Eigen::Vector3d distanceVector = P - K;
- if (distanceVector.norm () > threshold)
+ if (distanceVector.squaredNorm () > squared_threshold)
return (false);
}
return (true);
return (false);
if (radius_min_ != -DBL_MAX && model_coefficients[3] < radius_min_)
+ {
+ PCL_DEBUG ("[pcl::SampleConsensusModelCircle3D::isModelValid] Radius of circle is too small: should be larger than %g, but is %g.\n",
+ radius_min_, model_coefficients[3]);
return (false);
+ }
if (radius_max_ != DBL_MAX && model_coefficients[3] > radius_max_)
+ {
+ PCL_DEBUG ("[pcl::SampleConsensusModelCircle3D::isModelValid] Radius of circle is too big: should be smaller than %g, but is %g.\n",
+ radius_max_, model_coefficients[3]);
return (false);
+ }
return (true);
}
#ifndef PCL_SAMPLE_CONSENSUS_IMPL_SAC_MODEL_CONE_H_
#define PCL_SAMPLE_CONSENSUS_IMPL_SAC_MODEL_CONE_H_
-#include <pcl/sample_consensus/eigen.h>
+#include <unsupported/Eigen/NonLinearOptimization> // for LevenbergMarquardt
#include <pcl/sample_consensus/sac_model_cone.h>
+#include <pcl/common/common.h> // for getAngle3D
#include <pcl/common/concatenate.h>
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
if (model_coefficients[6] != std::numeric_limits<double>::max() && model_coefficients[6] > max_angle_)
return (false);
+ PCL_DEBUG ("[pcl::SampleConsensusModelCone::computeModelCoefficients] Model is (%g,%g,%g,%g,%g,%g,%g).\n",
+ model_coefficients[0], model_coefficients[1], model_coefficients[2], model_coefficients[3],
+ model_coefficients[4], model_coefficients[5], model_coefficients[6]);
return (true);
}
for (std::size_t i = 0; i < indices_->size (); ++i)
{
Eigen::Vector4f pt ((*input_)[(*indices_)[i]].x, (*input_)[(*indices_)[i]].y, (*input_)[(*indices_)[i]].z, 0.0f);
- Eigen::Vector4f n ((*normals_)[(*indices_)[i]].normal[0], (*normals_)[(*indices_)[i]].normal[1], (*normals_)[(*indices_)[i]].normal[2], 0.0f);
// Calculate the point's projection on the cone axis
float k = (pt.dot (axis_dir) - apexdotdir) * dirdotdir;
Eigen::Vector4f pt_proj = apex + k * axis_dir;
- Eigen::Vector4f dir = pt - pt_proj;
- dir.normalize ();
// Calculate the actual radius of the cone at the level of the projected point
Eigen::Vector4f height = apex - pt_proj;
float actual_cone_radius = tanf (opening_angle) * height.norm ();
- height.normalize ();
-
- // Calculate the cones perfect normals
- Eigen::Vector4f cone_normal = sinf (opening_angle) * height + std::cos (opening_angle) * dir;
// Approximate the distance from the point to the cone as the difference between
// dist(point,cone_axis) and actual cone radius
- double d_euclid = std::abs (pointToAxisDistance (pt, model_coefficients) - actual_cone_radius);
+ const double weighted_euclid_dist = (1.0 - normal_distance_weight_) * std::abs (pointToAxisDistance (pt, model_coefficients) - actual_cone_radius);
+
+ // Calculate the direction of the point from center
+ Eigen::Vector4f dir = pt - pt_proj;
+ dir.normalize ();
+
+ // Calculate the cones perfect normals
+ height.normalize ();
+ Eigen::Vector4f cone_normal = sinf (opening_angle) * height + std::cos (opening_angle) * dir;
// Calculate the angular distance between the point normal and the (dir=pt_proj->pt) vector
+ Eigen::Vector4f n ((*normals_)[(*indices_)[i]].normal[0], (*normals_)[(*indices_)[i]].normal[1], (*normals_)[(*indices_)[i]].normal[2], 0.0f);
double d_normal = std::abs (getAngle3D (n, cone_normal));
d_normal = (std::min) (d_normal, M_PI - d_normal);
- distances[i] = std::abs (normal_distance_weight_ * d_normal + (1.0 - normal_distance_weight_) * d_euclid);
+ distances[i] = std::abs (normal_distance_weight_ * d_normal + weighted_euclid_dist);
}
}
for (std::size_t i = 0; i < indices_->size (); ++i)
{
Eigen::Vector4f pt ((*input_)[(*indices_)[i]].x, (*input_)[(*indices_)[i]].y, (*input_)[(*indices_)[i]].z, 0.0f);
- Eigen::Vector4f n ((*normals_)[(*indices_)[i]].normal[0], (*normals_)[(*indices_)[i]].normal[1], (*normals_)[(*indices_)[i]].normal[2], 0.0f);
// Calculate the point's projection on the cone axis
float k = (pt.dot (axis_dir) - apexdotdir) * dirdotdir;
Eigen::Vector4f pt_proj = apex + k * axis_dir;
- // Calculate the direction of the point from center
- Eigen::Vector4f pp_pt_dir = pt - pt_proj;
- pp_pt_dir.normalize ();
-
// Calculate the actual radius of the cone at the level of the projected point
Eigen::Vector4f height = apex - pt_proj;
double actual_cone_radius = tan(opening_angle) * height.norm ();
- height.normalize ();
-
- // Calculate the cones perfect normals
- Eigen::Vector4f cone_normal = sinf (opening_angle) * height + std::cos (opening_angle) * pp_pt_dir;
// Approximate the distance from the point to the cone as the difference between
// dist(point,cone_axis) and actual cone radius
- double d_euclid = std::abs (pointToAxisDistance (pt, model_coefficients) - actual_cone_radius);
+ const double weighted_euclid_dist = (1.0 - normal_distance_weight_) * std::abs (pointToAxisDistance (pt, model_coefficients) - actual_cone_radius);
+ if (weighted_euclid_dist > threshold) // Early termination: cannot be an inlier
+ continue;
+
+ // Calculate the direction of the point from center
+ Eigen::Vector4f pp_pt_dir = pt - pt_proj;
+ pp_pt_dir.normalize ();
+
+ // Calculate the cones perfect normals
+ height.normalize ();
+ Eigen::Vector4f cone_normal = sinf (opening_angle) * height + std::cos (opening_angle) * pp_pt_dir;
// Calculate the angular distance between the point normal and the (dir=pt_proj->pt) vector
+ Eigen::Vector4f n ((*normals_)[(*indices_)[i]].normal[0], (*normals_)[(*indices_)[i]].normal[1], (*normals_)[(*indices_)[i]].normal[2], 0.0f);
double d_normal = std::abs (getAngle3D (n, cone_normal));
d_normal = (std::min) (d_normal, M_PI - d_normal);
- double distance = std::abs (normal_distance_weight_ * d_normal + (1.0 - normal_distance_weight_) * d_euclid);
+ double distance = std::abs (normal_distance_weight_ * d_normal + weighted_euclid_dist);
if (distance < threshold)
{
for (std::size_t i = 0; i < indices_->size (); ++i)
{
Eigen::Vector4f pt ((*input_)[(*indices_)[i]].x, (*input_)[(*indices_)[i]].y, (*input_)[(*indices_)[i]].z, 0.0f);
- Eigen::Vector4f n ((*normals_)[(*indices_)[i]].normal[0], (*normals_)[(*indices_)[i]].normal[1], (*normals_)[(*indices_)[i]].normal[2], 0.0f);
// Calculate the point's projection on the cone axis
float k = (pt.dot (axis_dir) - apexdotdir) * dirdotdir;
Eigen::Vector4f pt_proj = apex + k * axis_dir;
- // Calculate the direction of the point from center
- Eigen::Vector4f pp_pt_dir = pt - pt_proj;
- pp_pt_dir.normalize ();
-
// Calculate the actual radius of the cone at the level of the projected point
Eigen::Vector4f height = apex - pt_proj;
double actual_cone_radius = tan(opening_angle) * height.norm ();
- height.normalize ();
-
- // Calculate the cones perfect normals
- Eigen::Vector4f cone_normal = sinf (opening_angle) * height + std::cos (opening_angle) * pp_pt_dir;
// Approximate the distance from the point to the cone as the difference between
// dist(point,cone_axis) and actual cone radius
- double d_euclid = std::abs (pointToAxisDistance (pt, model_coefficients) - actual_cone_radius);
+ const double weighted_euclid_dist = (1.0 - normal_distance_weight_) * std::abs (pointToAxisDistance (pt, model_coefficients) - actual_cone_radius);
+ if (weighted_euclid_dist > threshold) // Early termination: cannot be an inlier
+ continue;
+
+ // Calculate the direction of the point from center
+ Eigen::Vector4f pp_pt_dir = pt - pt_proj;
+ pp_pt_dir.normalize ();
+
+ // Calculate the cones perfect normals
+ height.normalize ();
+ Eigen::Vector4f cone_normal = sinf (opening_angle) * height + std::cos (opening_angle) * pp_pt_dir;
// Calculate the angular distance between the point normal and the (dir=pt_proj->pt) vector
+ Eigen::Vector4f n ((*normals_)[(*indices_)[i]].normal[0], (*normals_)[(*indices_)[i]].normal[1], (*normals_)[(*indices_)[i]].normal[2], 0.0f);
double d_normal = std::abs (getAngle3D (n, cone_normal));
d_normal = (std::min) (d_normal, M_PI - d_normal);
- if (std::abs (normal_distance_weight_ * d_normal + (1.0 - normal_distance_weight_) * d_euclid) < threshold)
+ if (std::abs (normal_distance_weight_ * d_normal + weighted_euclid_dist) < threshold)
nr_p++;
}
return (nr_p);
if (copy_data_fields)
{
// Allocate enough space and copy the basics
- projected_points.points.resize (input_->size ());
+ projected_points.resize (input_->size ());
projected_points.width = input_->width;
projected_points.height = input_->height;
else
{
// Allocate enough space and copy the basics
- projected_points.points.resize (inliers.size ());
+ projected_points.resize (inliers.size ());
projected_points.width = inliers.size ();
projected_points.height = 1;
angle_diff = (std::min) (angle_diff, M_PI - angle_diff);
// Check whether the current cone model satisfies our angle threshold criterion with respect to the given axis
if (angle_diff > eps_angle_)
+ {
+ PCL_DEBUG ("[pcl::SampleConsensusModelCone::isModelValid] Angle between cone direction and given axis is too large.\n");
return (false);
+ }
}
if (model_coefficients[6] != -std::numeric_limits<double>::max() && model_coefficients[6] < min_angle_)
+ {
+ PCL_DEBUG ("[pcl::SampleConsensusModelCone::isModelValid] The opening angle is too small: should be larger than %g, but is %g.\n",
+ min_angle_, model_coefficients[6]);
return (false);
+ }
if (model_coefficients[6] != std::numeric_limits<double>::max() && model_coefficients[6] > max_angle_)
+ {
+ PCL_DEBUG ("[pcl::SampleConsensusModelCone::isModelValid] The opening angle is too big: should be smaller than %g, but is %g.\n",
+ max_angle_, model_coefficients[6]);
return (false);
+ }
return (true);
}
#ifndef PCL_SAMPLE_CONSENSUS_IMPL_SAC_MODEL_CYLINDER_H_
#define PCL_SAMPLE_CONSENSUS_IMPL_SAC_MODEL_CYLINDER_H_
-#include <pcl/sample_consensus/eigen.h>
+#include <unsupported/Eigen/NonLinearOptimization> // for LevenbergMarquardt
#include <pcl/sample_consensus/sac_model_cylinder.h>
+#include <pcl/common/common.h> // for getAngle3D
#include <pcl/common/concatenate.h>
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
if (model_coefficients[6] > radius_max_ || model_coefficients[6] < radius_min_)
return (false);
+ PCL_DEBUG ("[pcl::SampleConsensusModelCylinder::computeModelCoefficients] Model is (%g,%g,%g,%g,%g,%g,%g).\n",
+ model_coefficients[0], model_coefficients[1], model_coefficients[2], model_coefficients[3],
+ model_coefficients[4], model_coefficients[5], model_coefficients[6]);
return (true);
}
// dist(point,cylinder_axis) and cylinder radius
// @note need to revise this.
Eigen::Vector4f pt ((*input_)[(*indices_)[i]].x, (*input_)[(*indices_)[i]].y, (*input_)[(*indices_)[i]].z, 0.0f);
- Eigen::Vector4f n ((*normals_)[(*indices_)[i]].normal[0], (*normals_)[(*indices_)[i]].normal[1], (*normals_)[(*indices_)[i]].normal[2], 0.0f);
- double d_euclid = std::abs (pointToLineDistance (pt, model_coefficients) - model_coefficients[6]);
+ const double weighted_euclid_dist = (1.0 - normal_distance_weight_) * std::abs (pointToLineDistance (pt, model_coefficients) - model_coefficients[6]);
// Calculate the point's projection on the cylinder axis
float k = (pt.dot (line_dir) - ptdotdir) * dirdotdir;
dir.normalize ();
// Calculate the angular distance between the point normal and the (dir=pt_proj->pt) vector
+ Eigen::Vector4f n ((*normals_)[(*indices_)[i]].normal[0], (*normals_)[(*indices_)[i]].normal[1], (*normals_)[(*indices_)[i]].normal[2], 0.0f);
double d_normal = std::abs (getAngle3D (n, dir));
d_normal = (std::min) (d_normal, M_PI - d_normal);
- distances[i] = std::abs (normal_distance_weight_ * d_normal + (1.0 - normal_distance_weight_) * d_euclid);
+ distances[i] = std::abs (normal_distance_weight_ * d_normal + weighted_euclid_dist);
}
}
// Approximate the distance from the point to the cylinder as the difference between
// dist(point,cylinder_axis) and cylinder radius
Eigen::Vector4f pt ((*input_)[(*indices_)[i]].x, (*input_)[(*indices_)[i]].y, (*input_)[(*indices_)[i]].z, 0.0f);
- Eigen::Vector4f n ((*normals_)[(*indices_)[i]].normal[0], (*normals_)[(*indices_)[i]].normal[1], (*normals_)[(*indices_)[i]].normal[2], 0.0f);
- double d_euclid = std::abs (pointToLineDistance (pt, model_coefficients) - model_coefficients[6]);
+ const double weighted_euclid_dist = (1.0 - normal_distance_weight_) * std::abs (pointToLineDistance (pt, model_coefficients) - model_coefficients[6]);
+ if (weighted_euclid_dist > threshold) // Early termination: cannot be an inlier
+ continue;
// Calculate the point's projection on the cylinder axis
float k = (pt.dot (line_dir) - ptdotdir) * dirdotdir;
dir.normalize ();
// Calculate the angular distance between the point normal and the (dir=pt_proj->pt) vector
+ Eigen::Vector4f n ((*normals_)[(*indices_)[i]].normal[0], (*normals_)[(*indices_)[i]].normal[1], (*normals_)[(*indices_)[i]].normal[2], 0.0f);
double d_normal = std::abs (getAngle3D (n, dir));
d_normal = (std::min) (d_normal, M_PI - d_normal);
- double distance = std::abs (normal_distance_weight_ * d_normal + (1.0 - normal_distance_weight_) * d_euclid);
+ double distance = std::abs (normal_distance_weight_ * d_normal + weighted_euclid_dist);
if (distance < threshold)
{
// Returns the indices of the points whose distances are smaller than the threshold
// Approximate the distance from the point to the cylinder as the difference between
// dist(point,cylinder_axis) and cylinder radius
Eigen::Vector4f pt ((*input_)[(*indices_)[i]].x, (*input_)[(*indices_)[i]].y, (*input_)[(*indices_)[i]].z, 0.0f);
- Eigen::Vector4f n ((*normals_)[(*indices_)[i]].normal[0], (*normals_)[(*indices_)[i]].normal[1], (*normals_)[(*indices_)[i]].normal[2], 0.0f);
- double d_euclid = std::abs (pointToLineDistance (pt, model_coefficients) - model_coefficients[6]);
+ const double weighted_euclid_dist = (1.0 - normal_distance_weight_) * std::abs (pointToLineDistance (pt, model_coefficients) - model_coefficients[6]);
+ if (weighted_euclid_dist > threshold) // Early termination: cannot be an inlier
+ continue;
// Calculate the point's projection on the cylinder axis
float k = (pt.dot (line_dir) - ptdotdir) * dirdotdir;
dir.normalize ();
// Calculate the angular distance between the point normal and the (dir=pt_proj->pt) vector
+ Eigen::Vector4f n ((*normals_)[(*indices_)[i]].normal[0], (*normals_)[(*indices_)[i]].normal[1], (*normals_)[(*indices_)[i]].normal[2], 0.0f);
double d_normal = std::abs (getAngle3D (n, dir));
d_normal = (std::min) (d_normal, M_PI - d_normal);
- if (std::abs (normal_distance_weight_ * d_normal + (1.0 - normal_distance_weight_) * d_euclid) < threshold)
+ if (std::abs (normal_distance_weight_ * d_normal + weighted_euclid_dist) < threshold)
nr_p++;
}
return (nr_p);
if (copy_data_fields)
{
// Allocate enough space and copy the basics
- projected_points.points.resize (input_->size ());
+ projected_points.resize (input_->size ());
projected_points.width = input_->width;
projected_points.height = input_->height;
else
{
// Allocate enough space and copy the basics
- projected_points.points.resize (inliers.size ());
+ projected_points.resize (inliers.size ());
projected_points.width = inliers.size ();
projected_points.height = 1;
angle_diff = (std::min) (angle_diff, M_PI - angle_diff);
// Check whether the current cylinder model satisfies our angle threshold criterion with respect to the given axis
if (angle_diff > eps_angle_)
+ {
+ PCL_DEBUG ("[pcl::SampleConsensusModelCylinder::isModelValid] Angle between cylinder direction and given axis is too large.\n");
return (false);
+ }
}
if (radius_min_ != -std::numeric_limits<double>::max() && model_coefficients[6] < radius_min_)
+ {
+ PCL_DEBUG ("[pcl::SampleConsensusModelCylinder::isModelValid] Radius is too small: should be larger than %g, but is %g.\n",
+ radius_min_, model_coefficients[6]);
return (false);
+ }
if (radius_max_ != std::numeric_limits<double>::max() && model_coefficients[6] > radius_max_)
+ {
+ PCL_DEBUG ("[pcl::SampleConsensusModelCylinder::isModelValid] Radius is too big: should be smaller than %g, but is %g.\n",
+ radius_max_, model_coefficients[6]);
return (false);
+ }
return (true);
}
#include <pcl/sample_consensus/sac_model_line.h>
#include <pcl/common/centroid.h>
#include <pcl/common/concatenate.h>
+#include <pcl/common/eigen.h> // for eigen33
//////////////////////////////////////////////////////////////////////////
template <typename PointT> bool
model_coefficients[5] = (*input_)[samples[1]].z - model_coefficients[2];
model_coefficients.template tail<3> ().normalize ();
+ PCL_DEBUG ("[pcl::SampleConsensusModelLine::computeModelCoefficients] Model is (%g,%g,%g,%g,%g,%g).\n",
+ model_coefficients[0], model_coefficients[1], model_coefficients[2],
+ model_coefficients[3], model_coefficients[4], model_coefficients[5]);
return (true);
}
// Compute the 3x3 covariance matrix
Eigen::Vector4f centroid;
- compute3DCentroid (*input_, inliers, centroid);
+ if (0 == compute3DCentroid (*input_, inliers, centroid))
+ {
+ PCL_WARN ("[pcl::SampleConsensusModelLine::optimizeModelCoefficients] compute3DCentroid failed (returned 0) because there are no valid inliers.\n");
+ optimized_coefficients = model_coefficients;
+ return;
+ }
Eigen::Matrix3f covariance_matrix;
computeCovarianceMatrix (*input_, inliers, centroid, covariance_matrix);
optimized_coefficients[0] = centroid[0];
if (copy_data_fields)
{
// Allocate enough space and copy the basics
- projected_points.points.resize (input_->size ());
+ projected_points.resize (input_->size ());
projected_points.width = input_->width;
projected_points.height = input_->height;
else
{
// Allocate enough space and copy the basics
- projected_points.points.resize (inliers.size ());
+ projected_points.resize (inliers.size ());
projected_points.width = inliers.size ();
projected_points.height = 1;
coeff.normalize ();
if (std::abs (axis_.dot (coeff)) < cos_angle_)
+ {
+ PCL_DEBUG ("[pcl::SampleConsensusModelNormalParallelPlane::isModelValid] Angle between plane normal and given axis is too large.\n");
return (false);
+ }
}
if (eps_dist_ > 0.0)
{
if (std::abs (-model_coefficients[3] - distance_from_origin_) > eps_dist_)
+ {
+ PCL_DEBUG ("[pcl::SampleConsensusModelNormalParallelPlane::isModelValid] Distance of plane to origin is wrong: expected %g, but is %g, difference is larger than %g.\n",
+ distance_from_origin_, -model_coefficients[3], eps_dist_);
return (false);
+ }
}
return (true);
#define PCL_SAMPLE_CONSENSUS_IMPL_SAC_MODEL_NORMAL_PLANE_H_
#include <pcl/sample_consensus/sac_model_normal_plane.h>
+#include <pcl/sample_consensus/impl/sac_model_plane.hpp> // for dist4, dist8
#include <pcl/common/common.h> // for getAngle3D
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
if (!isModelValid (model_coefficients))
return (0);
+#if defined (__AVX__) && defined (__AVX2__)
+ return countWithinDistanceAVX (model_coefficients, threshold);
+#elif defined (__SSE__) && defined (__SSE2__) && defined (__SSE4_1__)
+ return countWithinDistanceSSE (model_coefficients, threshold);
+#else
+ return countWithinDistanceStandard (model_coefficients, threshold);
+#endif
+}
+
+//////////////////////////////////////////////////////////////////////////
+template <typename PointT, typename PointNT> std::size_t
+pcl::SampleConsensusModelNormalPlane<PointT, PointNT>::countWithinDistanceStandard (
+ const Eigen::VectorXf &model_coefficients, const double threshold, std::size_t i) const
+{
+ std::size_t nr_p = 0;
+
// Obtain the plane normal
Eigen::Vector4f coeff = model_coefficients;
coeff[3] = 0.0f;
- std::size_t nr_p = 0;
-
// Iterate through the 3d points and calculate the distances from them to the plane
- for (std::size_t i = 0; i < indices_->size (); ++i)
+ for (; i < indices_->size (); ++i)
{
const PointT &pt = (*input_)[(*indices_)[i]];
const PointNT &nt = (*normals_)[(*indices_)[i]];
// Calculate the distance from the point to the plane normal as the dot product
// D = (P-A).N/|N|
- Eigen::Vector4f p (pt.x, pt.y, pt.z, 0.0f);
- Eigen::Vector4f n (nt.normal_x, nt.normal_y, nt.normal_z, 0.0f);
- double d_euclid = std::abs (coeff.dot (p) + model_coefficients[3]);
+ const Eigen::Vector4f p (pt.x, pt.y, pt.z, 0.0f);
+ const Eigen::Vector4f n (nt.normal_x, nt.normal_y, nt.normal_z, 0.0f);
+ const double d_euclid = std::abs (coeff.dot (p) + model_coefficients[3]);
// Calculate the angular distance between the point normal and the plane normal
double d_normal = std::abs (getAngle3D (n, coeff));
d_normal = (std::min) (d_normal, M_PI - d_normal);
// Weight with the point curvature. On flat surfaces, curvature -> 0, which means the normal will have a higher influence
- double weight = normal_distance_weight_ * (1.0 - nt.curvature);
+ const double weight = normal_distance_weight_ * (1.0 - nt.curvature);
if (std::abs (weight * d_normal + (1.0 - weight) * d_euclid) < threshold)
+ {
nr_p++;
+ }
}
return (nr_p);
}
+//////////////////////////////////////////////////////////////////////////
+#if defined (__SSE__) && defined (__SSE2__) && defined (__SSE4_1__)
+template <typename PointT, typename PointNT> std::size_t
+pcl::SampleConsensusModelNormalPlane<PointT, PointNT>::countWithinDistanceSSE (
+ const Eigen::VectorXf &model_coefficients, const double threshold, std::size_t i) const
+{
+ std::size_t nr_p = 0;
+ const __m128 a_vec = _mm_set1_ps (model_coefficients[0]);
+ const __m128 b_vec = _mm_set1_ps (model_coefficients[1]);
+ const __m128 c_vec = _mm_set1_ps (model_coefficients[2]);
+ const __m128 d_vec = _mm_set1_ps (model_coefficients[3]);
+ const __m128 threshold_vec = _mm_set1_ps (threshold);
+ const __m128 normal_distance_weight_vec = _mm_set1_ps (normal_distance_weight_);
+ const __m128 abs_help = _mm_set1_ps (-0.0F); // -0.0F (negative zero) means that all bits are 0, only the sign bit is 1
+ __m128i res = _mm_set1_epi32(0); // This corresponds to nr_p: 4 32bit integers that, summed together, hold the number of inliers
+ for (; (i + 4) <= indices_->size (); i += 4)
+ {
+ const __m128 d_euclid_vec = pcl::SampleConsensusModelPlane<PointT>::dist4 (i, a_vec, b_vec, c_vec, d_vec, abs_help);
+
+ const __m128 d_normal_vec = getAcuteAngle3DSSE (
+ _mm_set_ps ((*normals_)[(*indices_)[i ]].normal_x,
+ (*normals_)[(*indices_)[i+1]].normal_x,
+ (*normals_)[(*indices_)[i+2]].normal_x,
+ (*normals_)[(*indices_)[i+3]].normal_x),
+ _mm_set_ps ((*normals_)[(*indices_)[i ]].normal_y,
+ (*normals_)[(*indices_)[i+1]].normal_y,
+ (*normals_)[(*indices_)[i+2]].normal_y,
+ (*normals_)[(*indices_)[i+3]].normal_y),
+ _mm_set_ps ((*normals_)[(*indices_)[i ]].normal_z,
+ (*normals_)[(*indices_)[i+1]].normal_z,
+ (*normals_)[(*indices_)[i+2]].normal_z,
+ (*normals_)[(*indices_)[i+3]].normal_z),
+ a_vec, b_vec, c_vec);
+ const __m128 weight_vec = _mm_mul_ps (normal_distance_weight_vec, _mm_sub_ps (_mm_set1_ps (1.0f),
+ _mm_set_ps ((*normals_)[(*indices_)[i ]].curvature,
+ (*normals_)[(*indices_)[i+1]].curvature,
+ (*normals_)[(*indices_)[i+2]].curvature,
+ (*normals_)[(*indices_)[i+3]].curvature)));
+ const __m128 dist = _mm_andnot_ps (abs_help, _mm_add_ps (_mm_mul_ps (weight_vec, d_normal_vec), _mm_mul_ps (_mm_sub_ps (_mm_set1_ps (1.0f), weight_vec), d_euclid_vec)));
+ const __m128 mask = _mm_cmplt_ps (dist, threshold_vec); // The mask contains 1 bits if the corresponding points are inliers, else 0 bits
+ res = _mm_add_epi32 (res, _mm_and_si128 (_mm_set1_epi32 (1), _mm_castps_si128 (mask))); // The latter part creates a vector with ones (as 32bit integers) where the points are inliers
+ }
+ nr_p += _mm_extract_epi32 (res, 0);
+ nr_p += _mm_extract_epi32 (res, 1);
+ nr_p += _mm_extract_epi32 (res, 2);
+ nr_p += _mm_extract_epi32 (res, 3);
+
+ // Process the remaining points (at most 3)
+ nr_p += countWithinDistanceStandard(model_coefficients, threshold, i);
+ return (nr_p);
+}
+#endif
+
+//////////////////////////////////////////////////////////////////////////
+#if defined (__AVX__) && defined (__AVX2__)
+template <typename PointT, typename PointNT> std::size_t
+pcl::SampleConsensusModelNormalPlane<PointT, PointNT>::countWithinDistanceAVX (
+ const Eigen::VectorXf &model_coefficients, const double threshold, std::size_t i) const
+{
+ std::size_t nr_p = 0;
+ const __m256 a_vec = _mm256_set1_ps (model_coefficients[0]);
+ const __m256 b_vec = _mm256_set1_ps (model_coefficients[1]);
+ const __m256 c_vec = _mm256_set1_ps (model_coefficients[2]);
+ const __m256 d_vec = _mm256_set1_ps (model_coefficients[3]);
+ const __m256 threshold_vec = _mm256_set1_ps (threshold);
+ const __m256 normal_distance_weight_vec = _mm256_set1_ps (normal_distance_weight_);
+ const __m256 abs_help = _mm256_set1_ps (-0.0F); // -0.0F (negative zero) means that all bits are 0, only the sign bit is 1
+ __m256i res = _mm256_set1_epi32(0); // This corresponds to nr_p: 8 32bit integers that, summed together, hold the number of inliers
+ for (; (i + 8) <= indices_->size (); i += 8)
+ {
+ const __m256 d_euclid_vec = pcl::SampleConsensusModelPlane<PointT>::dist8 (i, a_vec, b_vec, c_vec, d_vec, abs_help);
+
+ const __m256 d_normal_vec = getAcuteAngle3DAVX (
+ _mm256_set_ps ((*normals_)[(*indices_)[i ]].normal_x,
+ (*normals_)[(*indices_)[i+1]].normal_x,
+ (*normals_)[(*indices_)[i+2]].normal_x,
+ (*normals_)[(*indices_)[i+3]].normal_x,
+ (*normals_)[(*indices_)[i+4]].normal_x,
+ (*normals_)[(*indices_)[i+5]].normal_x,
+ (*normals_)[(*indices_)[i+6]].normal_x,
+ (*normals_)[(*indices_)[i+7]].normal_x),
+ _mm256_set_ps ((*normals_)[(*indices_)[i ]].normal_y,
+ (*normals_)[(*indices_)[i+1]].normal_y,
+ (*normals_)[(*indices_)[i+2]].normal_y,
+ (*normals_)[(*indices_)[i+3]].normal_y,
+ (*normals_)[(*indices_)[i+4]].normal_y,
+ (*normals_)[(*indices_)[i+5]].normal_y,
+ (*normals_)[(*indices_)[i+6]].normal_y,
+ (*normals_)[(*indices_)[i+7]].normal_y),
+ _mm256_set_ps ((*normals_)[(*indices_)[i ]].normal_z,
+ (*normals_)[(*indices_)[i+1]].normal_z,
+ (*normals_)[(*indices_)[i+2]].normal_z,
+ (*normals_)[(*indices_)[i+3]].normal_z,
+ (*normals_)[(*indices_)[i+4]].normal_z,
+ (*normals_)[(*indices_)[i+5]].normal_z,
+ (*normals_)[(*indices_)[i+6]].normal_z,
+ (*normals_)[(*indices_)[i+7]].normal_z),
+ a_vec, b_vec, c_vec);
+ const __m256 weight_vec = _mm256_mul_ps (normal_distance_weight_vec, _mm256_sub_ps (_mm256_set1_ps (1.0f),
+ _mm256_set_ps ((*normals_)[(*indices_)[i ]].curvature,
+ (*normals_)[(*indices_)[i+1]].curvature,
+ (*normals_)[(*indices_)[i+2]].curvature,
+ (*normals_)[(*indices_)[i+3]].curvature,
+ (*normals_)[(*indices_)[i+4]].curvature,
+ (*normals_)[(*indices_)[i+5]].curvature,
+ (*normals_)[(*indices_)[i+6]].curvature,
+ (*normals_)[(*indices_)[i+7]].curvature)));
+ const __m256 dist = _mm256_andnot_ps (abs_help, _mm256_add_ps (_mm256_mul_ps (weight_vec, d_normal_vec), _mm256_mul_ps (_mm256_sub_ps (_mm256_set1_ps (1.0f), weight_vec), d_euclid_vec)));
+ const __m256 mask = _mm256_cmp_ps (dist, threshold_vec, _CMP_LT_OQ); // The mask contains 1 bits if the corresponding points are inliers, else 0 bits
+ res = _mm256_add_epi32 (res, _mm256_and_si256 (_mm256_set1_epi32 (1), _mm256_castps_si256 (mask))); // The latter part creates a vector with ones (as 32bit integers) where the points are inliers
+ }
+ nr_p += _mm256_extract_epi32 (res, 0);
+ nr_p += _mm256_extract_epi32 (res, 1);
+ nr_p += _mm256_extract_epi32 (res, 2);
+ nr_p += _mm256_extract_epi32 (res, 3);
+ nr_p += _mm256_extract_epi32 (res, 4);
+ nr_p += _mm256_extract_epi32 (res, 5);
+ nr_p += _mm256_extract_epi32 (res, 6);
+ nr_p += _mm256_extract_epi32 (res, 7);
+
+ // Process the remaining points (at most 7)
+ nr_p += countWithinDistanceStandard(model_coefficients, threshold, i);
+ return (nr_p);
+}
+#endif
+
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT, typename PointNT> void
pcl::SampleConsensusModelNormalPlane<PointT, PointNT>::getDistancesToModel (
#define PCL_SAMPLE_CONSENSUS_IMPL_SAC_MODEL_NORMAL_SPHERE_H_
#include <pcl/sample_consensus/sac_model_normal_sphere.h>
+#include <pcl/common/common.h> // for getAngle3D
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT, typename PointNT> void
(*input_)[(*indices_)[i]].z,
0.0f);
+ Eigen::Vector4f n_dir = p - center;
+ const double weighted_euclid_dist = (1.0 - normal_distance_weight_) * std::abs (n_dir.norm () - model_coefficients[3]);
+ if (weighted_euclid_dist > threshold) // Early termination: cannot be an inlier
+ continue;
+
+ // Calculate the angular distance between the point normal and the sphere normal
Eigen::Vector4f n ((*normals_)[(*indices_)[i]].normal[0],
(*normals_)[(*indices_)[i]].normal[1],
(*normals_)[(*indices_)[i]].normal[2],
0.0f);
-
- Eigen::Vector4f n_dir = p - center;
- double d_euclid = std::abs (n_dir.norm () - model_coefficients[3]);
-
- // Calculate the angular distance between the point normal and the sphere normal
double d_normal = std::abs (getAngle3D (n, n_dir));
d_normal = (std::min) (d_normal, M_PI - d_normal);
- double distance = std::abs (normal_distance_weight_ * d_normal + (1.0 - normal_distance_weight_) * d_euclid);
+ double distance = std::abs (normal_distance_weight_ * d_normal + weighted_euclid_dist);
if (distance < threshold)
{
// Returns the indices of the points whose distances are smaller than the threshold
(*input_)[(*indices_)[i]].z,
0.0f);
+ Eigen::Vector4f n_dir = (p-center);
+ const double weighted_euclid_dist = (1.0 - normal_distance_weight_) * std::abs (n_dir.norm () - model_coefficients[3]);
+ if (weighted_euclid_dist > threshold) // Early termination: cannot be an inlier
+ continue;
+
+ // Calculate the angular distance between the point normal and the sphere normal
Eigen::Vector4f n ((*normals_)[(*indices_)[i]].normal[0],
(*normals_)[(*indices_)[i]].normal[1],
(*normals_)[(*indices_)[i]].normal[2],
0.0f);
-
- Eigen::Vector4f n_dir = (p-center);
- double d_euclid = std::abs (n_dir.norm () - model_coefficients[3]);
- //
- // Calculate the angular distance between the point normal and the sphere normal
double d_normal = std::abs (getAngle3D (n, n_dir));
d_normal = (std::min) (d_normal, M_PI - d_normal);
- if (std::abs (normal_distance_weight_ * d_normal + (1.0 - normal_distance_weight_) * d_euclid) < threshold)
+ if (std::abs (normal_distance_weight_ * d_normal + weighted_euclid_dist) < threshold)
nr_p++;
}
return (nr_p);
(*input_)[(*indices_)[i]].z,
0.0f);
+ Eigen::Vector4f n_dir = (p-center);
+ const double weighted_euclid_dist = (1.0 - normal_distance_weight_) * std::abs (n_dir.norm () - model_coefficients[3]);
+
+ // Calculate the angular distance between the point normal and the sphere normal
Eigen::Vector4f n ((*normals_)[(*indices_)[i]].normal[0],
(*normals_)[(*indices_)[i]].normal[1],
(*normals_)[(*indices_)[i]].normal[2],
0.0f);
-
- Eigen::Vector4f n_dir = (p-center);
- double d_euclid = std::abs (n_dir.norm () - model_coefficients[3]);
- //
- // Calculate the angular distance between the point normal and the sphere normal
double d_normal = std::abs (getAngle3D (n, n_dir));
d_normal = (std::min) (d_normal, M_PI - d_normal);
- distances[i] = std::abs (normal_distance_weight_ * d_normal + (1 - normal_distance_weight_) * d_euclid);
+ distances[i] = std::abs (normal_distance_weight_ * d_normal + weighted_euclid_dist);
}
}
// Check whether the current line model satisfies our angle threshold criterion with respect to the given axis
if (angle_diff > eps_angle_)
{
+ PCL_DEBUG ("[pcl::SampleConsensusModelParallelLine::isModelValid] Angle between line direction and given axis is too large.\n");
return (false);
}
}
Eigen::Vector4f axis (axis_[0], axis_[1], axis_[2], 0.0f);
if (std::abs (axis.dot (coeff)) > sin_angle_)
{
+ PCL_DEBUG ("[pcl::SampleConsensusModelParallelPlane::isModelValid] Angle between plane normal and given axis is too large.\n");
return (false);
}
}
#define PCL_SAMPLE_CONSENSUS_IMPL_SAC_MODEL_PERPENDICULAR_PLANE_H_
#include <pcl/sample_consensus/sac_model_perpendicular_plane.h>
+#include <pcl/common/common.h> // for getAngle3D
//////////////////////////////////////////////////////////////////////////
template <typename PointT> void
// Check whether the current plane model satisfies our angle threshold criterion with respect to the given axis
if (angle_diff > eps_angle_)
{
+ PCL_DEBUG ("[pcl::SampleConsensusModelPerpendicularPlane::isModelValid] Angle between plane normal and given axis should be smaller than %g, but is %g.\n",
+ eps_angle_, angle_diff);
return (false);
}
}
// ... + d = 0
model_coefficients[3] = -1.0f * (model_coefficients.template head<4>().dot (p0.matrix ()));
+ PCL_DEBUG ("[pcl::SampleConsensusModelPlane::computeModelCoefficients] Model is (%g,%g,%g,%g).\n",
+ model_coefficients[0], model_coefficients[1], model_coefficients[2], model_coefficients[3]);
return (true);
}
+#define AT(POS) ((*input_)[(*indices_)[(POS)]])
+
+#ifdef __AVX__
+// This function computes the distances of 8 points to the plane
+template <typename PointT> inline __m256 pcl::SampleConsensusModelPlane<PointT>::dist8 (const std::size_t i, const __m256 &a_vec, const __m256 &b_vec, const __m256 &c_vec, const __m256 &d_vec, const __m256 &abs_help) const
+{
+ // The andnot-function realizes an abs-operation: the sign bit is removed
+ return _mm256_andnot_ps (abs_help,
+ _mm256_add_ps (_mm256_add_ps (_mm256_mul_ps (a_vec, _mm256_set_ps (AT(i ).x, AT(i+1).x, AT(i+2).x, AT(i+3).x, AT(i+4).x, AT(i+5).x, AT(i+6).x, AT(i+7).x)),
+ _mm256_mul_ps (b_vec, _mm256_set_ps (AT(i ).y, AT(i+1).y, AT(i+2).y, AT(i+3).y, AT(i+4).y, AT(i+5).y, AT(i+6).y, AT(i+7).y))),
+ _mm256_add_ps (_mm256_mul_ps (c_vec, _mm256_set_ps (AT(i ).z, AT(i+1).z, AT(i+2).z, AT(i+3).z, AT(i+4).z, AT(i+5).z, AT(i+6).z, AT(i+7).z)),
+ d_vec))); // TODO this could be replaced by three fmadd-instructions (if available), but the speed gain would probably be minimal
+}
+#endif // ifdef __AVX__
+
+#ifdef __SSE__
+// This function computes the distances of 4 points to the plane
+template <typename PointT> inline __m128 pcl::SampleConsensusModelPlane<PointT>::dist4 (const std::size_t i, const __m128 &a_vec, const __m128 &b_vec, const __m128 &c_vec, const __m128 &d_vec, const __m128 &abs_help) const
+{
+ // The andnot-function realizes an abs-operation: the sign bit is removed
+ return _mm_andnot_ps (abs_help,
+ _mm_add_ps (_mm_add_ps (_mm_mul_ps (a_vec, _mm_set_ps (AT(i ).x, AT(i+1).x, AT(i+2).x, AT(i+3).x)),
+ _mm_mul_ps (b_vec, _mm_set_ps (AT(i ).y, AT(i+1).y, AT(i+2).y, AT(i+3).y))),
+ _mm_add_ps (_mm_mul_ps (c_vec, _mm_set_ps (AT(i ).z, AT(i+1).z, AT(i+2).z, AT(i+3).z)),
+ d_vec))); // TODO this could be replaced by three fmadd-instructions (if available), but the speed gain would probably be minimal
+}
+#endif // ifdef __SSE__
+
+#undef AT
+
//////////////////////////////////////////////////////////////////////////
template <typename PointT> void
pcl::SampleConsensusModelPlane<PointT>::getDistancesToModel (
PCL_ERROR ("[pcl::SampleConsensusModelPlane::countWithinDistance] Given model is invalid!\n");
return (0);
}
+#if defined (__AVX__) && defined (__AVX2__)
+ return countWithinDistanceAVX (model_coefficients, threshold);
+#elif defined (__SSE__) && defined (__SSE2__) && defined (__SSE4_1__)
+ return countWithinDistanceSSE (model_coefficients, threshold);
+#else
+ return countWithinDistanceStandard (model_coefficients, threshold);
+#endif
+}
+//////////////////////////////////////////////////////////////////////////
+template <typename PointT> std::size_t
+pcl::SampleConsensusModelPlane<PointT>::countWithinDistanceStandard (
+ const Eigen::VectorXf &model_coefficients, const double threshold, std::size_t i) const
+{
std::size_t nr_p = 0;
-
// Iterate through the 3d points and calculate the distances from them to the plane
- for (std::size_t i = 0; i < indices_->size (); ++i)
+ for (; i < indices_->size (); ++i)
{
// Calculate the distance from the point to the plane normal as the dot product
// D = (P-A).N/|N|
return (nr_p);
}
+//////////////////////////////////////////////////////////////////////////
+#if defined (__SSE__) && defined (__SSE2__) && defined (__SSE4_1__)
+template <typename PointT> std::size_t
+pcl::SampleConsensusModelPlane<PointT>::countWithinDistanceSSE (
+ const Eigen::VectorXf &model_coefficients, const double threshold, std::size_t i) const
+{
+ std::size_t nr_p = 0;
+ const __m128 a_vec = _mm_set1_ps (model_coefficients[0]);
+ const __m128 b_vec = _mm_set1_ps (model_coefficients[1]);
+ const __m128 c_vec = _mm_set1_ps (model_coefficients[2]);
+ const __m128 d_vec = _mm_set1_ps (model_coefficients[3]);
+ const __m128 threshold_vec = _mm_set1_ps (threshold);
+ const __m128 abs_help = _mm_set1_ps (-0.0F); // -0.0F (negative zero) means that all bits are 0, only the sign bit is 1
+ __m128i res = _mm_set1_epi32(0); // This corresponds to nr_p: 4 32bit integers that, summed together, hold the number of inliers
+ for (; (i + 4) <= indices_->size (); i += 4)
+ {
+ const __m128 mask = _mm_cmplt_ps (dist4 (i, a_vec, b_vec, c_vec, d_vec, abs_help), threshold_vec); // The mask contains 1 bits if the corresponding points are inliers, else 0 bits
+ res = _mm_add_epi32 (res, _mm_and_si128 (_mm_set1_epi32 (1), _mm_castps_si128 (mask))); // The latter part creates a vector with ones (as 32bit integers) where the points are inliers
+ //const int res = _mm_movemask_ps (mask);
+ //if (res & 1) nr_p++;
+ //if (res & 2) nr_p++;
+ //if (res & 4) nr_p++;
+ //if (res & 8) nr_p++;
+ }
+ nr_p += _mm_extract_epi32 (res, 0);
+ nr_p += _mm_extract_epi32 (res, 1);
+ nr_p += _mm_extract_epi32 (res, 2);
+ nr_p += _mm_extract_epi32 (res, 3);
+
+ // Process the remaining points (at most 3)
+ nr_p += countWithinDistanceStandard(model_coefficients, threshold, i);
+ return (nr_p);
+}
+#endif
+
+//////////////////////////////////////////////////////////////////////////
+#if defined (__AVX__) && defined (__AVX2__)
+template <typename PointT> std::size_t
+pcl::SampleConsensusModelPlane<PointT>::countWithinDistanceAVX (
+ const Eigen::VectorXf &model_coefficients, const double threshold, std::size_t i) const
+{
+ std::size_t nr_p = 0;
+ const __m256 a_vec = _mm256_set1_ps (model_coefficients[0]);
+ const __m256 b_vec = _mm256_set1_ps (model_coefficients[1]);
+ const __m256 c_vec = _mm256_set1_ps (model_coefficients[2]);
+ const __m256 d_vec = _mm256_set1_ps (model_coefficients[3]);
+ const __m256 threshold_vec = _mm256_set1_ps (threshold);
+ const __m256 abs_help = _mm256_set1_ps (-0.0F); // -0.0F (negative zero) means that all bits are 0, only the sign bit is 1
+ __m256i res = _mm256_set1_epi32(0); // This corresponds to nr_p: 8 32bit integers that, summed together, hold the number of inliers
+ for (; (i + 8) <= indices_->size (); i += 8)
+ {
+ const __m256 mask = _mm256_cmp_ps (dist8 (i, a_vec, b_vec, c_vec, d_vec, abs_help), threshold_vec, _CMP_LT_OQ); // The mask contains 1 bits if the corresponding points are inliers, else 0 bits
+ res = _mm256_add_epi32 (res, _mm256_and_si256 (_mm256_set1_epi32 (1), _mm256_castps_si256 (mask))); // The latter part creates a vector with ones (as 32bit integers) where the points are inliers
+ //const int res = _mm256_movemask_ps (mask);
+ //if (res & 1) nr_p++;
+ //if (res & 2) nr_p++;
+ //if (res & 4) nr_p++;
+ //if (res & 8) nr_p++;
+ //if (res & 16) nr_p++;
+ //if (res & 32) nr_p++;
+ //if (res & 64) nr_p++;
+ //if (res & 128) nr_p++;
+ }
+ nr_p += _mm256_extract_epi32 (res, 0);
+ nr_p += _mm256_extract_epi32 (res, 1);
+ nr_p += _mm256_extract_epi32 (res, 2);
+ nr_p += _mm256_extract_epi32 (res, 3);
+ nr_p += _mm256_extract_epi32 (res, 4);
+ nr_p += _mm256_extract_epi32 (res, 5);
+ nr_p += _mm256_extract_epi32 (res, 6);
+ nr_p += _mm256_extract_epi32 (res, 7);
+
+ // Process the remaining points (at most 7)
+ nr_p += countWithinDistanceStandard(model_coefficients, threshold, i);
+ return (nr_p);
+}
+#endif
+
//////////////////////////////////////////////////////////////////////////
template <typename PointT> void
pcl::SampleConsensusModelPlane<PointT>::optimizeModelCoefficients (
if (copy_data_fields)
{
// Allocate enough space and copy the basics
- projected_points.points.resize (input_->size ());
+ projected_points.resize (input_->size ());
projected_points.width = input_->width;
projected_points.height = input_->height;
else
{
// Allocate enough space and copy the basics
- projected_points.points.resize (inliers.size ());
+ projected_points.resize (inliers.size ());
projected_points.width = inliers.size ();
projected_points.height = 1;
#define PCL_SAMPLE_CONSENSUS_IMPL_SAC_MODEL_REGISTRATION_H_
#include <pcl/sample_consensus/sac_model_registration.h>
-#include <pcl/common/eigen.h>
//////////////////////////////////////////////////////////////////////////
template <typename PointT> bool
Indices indices_tgt (3);
for (int i = 0; i < 3; ++i)
{
- indices_tgt[i] = correspondences_.at (samples[i]);
+ const auto it = correspondences_.find (samples[i]);
+ if (it == correspondences_.cend ())
+ {
+ PCL_ERROR ("[pcl::SampleConsensusModelRegistration::computeModelCoefficients] Element with key %i is not in map (map contains %lu elements).\n",
+ samples[i], correspondences_.size ());
+ return (false);
+ }
+ indices_tgt[i] = it->second;
}
estimateRigidTransformationSVD (*input_, samples, *target_, indices_tgt, model_coefficients);
for (std::size_t i = 0; i < inliers.size (); ++i)
{
indices_src[i] = inliers[i];
- indices_tgt[i] = correspondences_.at (indices_src[i]);
+ const auto it = correspondences_.find (indices_src[i]);
+ if (it == correspondences_.cend ())
+ {
+ PCL_ERROR ("[pcl::SampleConsensusModelRegistration::optimizeModelCoefficients] Element with key %i is not in map (map contains %lu elements).\n",
+ indices_src[i], correspondences_.size ());
+ optimized_coefficients = model_coefficients;
+ return;
+ }
+ indices_tgt[i] = it->second;
}
estimateRigidTransformationSVD (*input_, indices_src, *target_, indices_tgt, optimized_coefficients);
#define PCL_SAMPLE_CONSENSUS_IMPL_SAC_MODEL_REGISTRATION_2D_HPP_
#include <pcl/sample_consensus/sac_model_registration_2d.h>
-#include <pcl/common/eigen.h>
//////////////////////////////////////////////////////////////////////////
template <typename PointT> bool
#ifndef PCL_SAMPLE_CONSENSUS_IMPL_SAC_MODEL_SPHERE_H_
#define PCL_SAMPLE_CONSENSUS_IMPL_SAC_MODEL_SPHERE_H_
-#include <pcl/sample_consensus/eigen.h>
+#include <unsupported/Eigen/NonLinearOptimization> // for LevenbergMarquardt
#include <pcl/sample_consensus/sac_model_sphere.h>
//////////////////////////////////////////////////////////////////////////
model_coefficients[1] * model_coefficients[1] +
model_coefficients[2] * model_coefficients[2] - m15 / m11);
+ PCL_DEBUG ("[pcl::SampleConsensusModelSphere::computeModelCoefficients] Model is (%g,%g,%g,%g)\n",
+ model_coefficients[0], model_coefficients[1], model_coefficients[2], model_coefficients[3]);
return (true);
}
+#define AT(POS) ((*input_)[(*indices_)[(POS)]])
+
+#ifdef __AVX__
+// This function computes the squared distances (i.e. the distances without the square root) of 8 points to the center of the sphere
+template <typename PointT> inline __m256 pcl::SampleConsensusModelSphere<PointT>::sqr_dist8 (const std::size_t i, const __m256 a_vec, const __m256 b_vec, const __m256 c_vec) const
+{
+ const __m256 tmp1 = _mm256_sub_ps (_mm256_set_ps (AT(i ).x, AT(i+1).x, AT(i+2).x, AT(i+3).x, AT(i+4).x, AT(i+5).x, AT(i+6).x, AT(i+7).x), a_vec);
+ const __m256 tmp2 = _mm256_sub_ps (_mm256_set_ps (AT(i ).y, AT(i+1).y, AT(i+2).y, AT(i+3).y, AT(i+4).y, AT(i+5).y, AT(i+6).y, AT(i+7).y), b_vec);
+ const __m256 tmp3 = _mm256_sub_ps (_mm256_set_ps (AT(i ).z, AT(i+1).z, AT(i+2).z, AT(i+3).z, AT(i+4).z, AT(i+5).z, AT(i+6).z, AT(i+7).z), c_vec);
+ return _mm256_add_ps (_mm256_add_ps (_mm256_mul_ps (tmp1, tmp1), _mm256_mul_ps (tmp2, tmp2)), _mm256_mul_ps(tmp3, tmp3));
+}
+#endif // ifdef __AVX__
+
+#ifdef __SSE__
+// This function computes the squared distances (i.e. the distances without the square root) of 4 points to the center of the sphere
+template <typename PointT> inline __m128 pcl::SampleConsensusModelSphere<PointT>::sqr_dist4 (const std::size_t i, const __m128 a_vec, const __m128 b_vec, const __m128 c_vec) const
+{
+ const __m128 tmp1 = _mm_sub_ps (_mm_set_ps (AT(i ).x, AT(i+1).x, AT(i+2).x, AT(i+3).x), a_vec);
+ const __m128 tmp2 = _mm_sub_ps (_mm_set_ps (AT(i ).y, AT(i+1).y, AT(i+2).y, AT(i+3).y), b_vec);
+ const __m128 tmp3 = _mm_sub_ps (_mm_set_ps (AT(i ).z, AT(i+1).z, AT(i+2).z, AT(i+3).z), c_vec);
+ return _mm_add_ps (_mm_add_ps (_mm_mul_ps (tmp1, tmp1), _mm_mul_ps (tmp2, tmp2)), _mm_mul_ps(tmp3, tmp3));
+}
+#endif // ifdef __SSE__
+
+#undef AT
+
//////////////////////////////////////////////////////////////////////////
template <typename PointT> void
pcl::SampleConsensusModelSphere<PointT>::getDistancesToModel (
}
distances.resize (indices_->size ());
+ const Eigen::Vector3f center (model_coefficients[0], model_coefficients[1], model_coefficients[2]);
// Iterate through the 3d points and calculate the distances from them to the sphere
for (std::size_t i = 0; i < indices_->size (); ++i)
{
// Calculate the distance from the point to the sphere as the difference between
//dist(point,sphere_origin) and sphere_radius
- distances[i] = std::abs (std::sqrt (
- ( (*input_)[(*indices_)[i]].x - model_coefficients[0] ) *
- ( (*input_)[(*indices_)[i]].x - model_coefficients[0] ) +
-
- ( (*input_)[(*indices_)[i]].y - model_coefficients[1] ) *
- ( (*input_)[(*indices_)[i]].y - model_coefficients[1] ) +
-
- ( (*input_)[(*indices_)[i]].z - model_coefficients[2] ) *
- ( (*input_)[(*indices_)[i]].z - model_coefficients[2] )
- ) - model_coefficients[3]);
+ distances[i] = std::abs (((*input_)[(*indices_)[i]].getVector3fMap () - center).norm () - model_coefficients[3]);
}
}
inliers.reserve (indices_->size ());
error_sqr_dists_.reserve (indices_->size ());
+ const float sqr_inner_radius = (model_coefficients[3] <= threshold ? 0.0f : (model_coefficients[3] - threshold) * (model_coefficients[3] - threshold));
+ const float sqr_outer_radius = (model_coefficients[3] + threshold) * (model_coefficients[3] + threshold);
+ const Eigen::Vector3f center (model_coefficients[0], model_coefficients[1], model_coefficients[2]);
// Iterate through the 3d points and calculate the distances from them to the sphere
for (std::size_t i = 0; i < indices_->size (); ++i)
{
- double distance = std::abs (std::sqrt (
- ( (*input_)[(*indices_)[i]].x - model_coefficients[0] ) *
- ( (*input_)[(*indices_)[i]].x - model_coefficients[0] ) +
-
- ( (*input_)[(*indices_)[i]].y - model_coefficients[1] ) *
- ( (*input_)[(*indices_)[i]].y - model_coefficients[1] ) +
-
- ( (*input_)[(*indices_)[i]].z - model_coefficients[2] ) *
- ( (*input_)[(*indices_)[i]].z - model_coefficients[2] )
- ) - model_coefficients[3]);
- // Calculate the distance from the point to the sphere as the difference between
- // dist(point,sphere_origin) and sphere_radius
- if (distance < threshold)
+ // To avoid sqrt computation: consider one larger sphere (radius + threshold) and one smaller sphere (radius - threshold).
+ // Valid if point is in larger sphere, but not in smaller sphere.
+ const float sqr_dist = ((*input_)[(*indices_)[i]].getVector3fMap () - center).squaredNorm ();
+ if ((sqr_dist <= sqr_outer_radius) && (sqr_dist >= sqr_inner_radius))
{
// Returns the indices of the points whose distances are smaller than the threshold
inliers.push_back ((*indices_)[i]);
- error_sqr_dists_.push_back (static_cast<double> (distance));
+ // Only compute exact distance if necessary (if point is inlier)
+ error_sqr_dists_.push_back (static_cast<double> (std::abs (std::sqrt (sqr_dist) - model_coefficients[3])));
}
}
}
if (!isModelValid (model_coefficients))
return (0);
- std::size_t nr_p = 0;
+#if defined (__AVX__) && defined (__AVX2__)
+ return countWithinDistanceAVX (model_coefficients, threshold);
+#elif defined (__SSE__) && defined (__SSE2__) && defined (__SSE4_1__)
+ return countWithinDistanceSSE (model_coefficients, threshold);
+#else
+ return countWithinDistanceStandard (model_coefficients, threshold);
+#endif
+}
+//////////////////////////////////////////////////////////////////////////
+template <typename PointT> std::size_t
+pcl::SampleConsensusModelSphere<PointT>::countWithinDistanceStandard (
+ const Eigen::VectorXf &model_coefficients, const double threshold, std::size_t i) const
+{
+ std::size_t nr_p = 0;
+ const float sqr_inner_radius = (model_coefficients[3] <= threshold ? 0.0f : (model_coefficients[3] - threshold) * (model_coefficients[3] - threshold));
+ const float sqr_outer_radius = (model_coefficients[3] + threshold) * (model_coefficients[3] + threshold);
+ const Eigen::Vector3f center (model_coefficients[0], model_coefficients[1], model_coefficients[2]);
// Iterate through the 3d points and calculate the distances from them to the sphere
- for (std::size_t i = 0; i < indices_->size (); ++i)
+ for (; i < indices_->size (); ++i)
{
- // Calculate the distance from the point to the sphere as the difference between
- // dist(point,sphere_origin) and sphere_radius
- if (std::abs (std::sqrt (
- ( (*input_)[(*indices_)[i]].x - model_coefficients[0] ) *
- ( (*input_)[(*indices_)[i]].x - model_coefficients[0] ) +
+ // To avoid sqrt computation: consider one larger sphere (radius + threshold) and one smaller sphere (radius - threshold).
+ // Valid if point is in larger sphere, but not in smaller sphere.
+ const float sqr_dist = ((*input_)[(*indices_)[i]].getVector3fMap () - center).squaredNorm ();
+ if ((sqr_dist <= sqr_outer_radius) && (sqr_dist >= sqr_inner_radius))
+ nr_p++;
+ }
+ return (nr_p);
+}
- ( (*input_)[(*indices_)[i]].y - model_coefficients[1] ) *
- ( (*input_)[(*indices_)[i]].y - model_coefficients[1] ) +
+//////////////////////////////////////////////////////////////////////////
+#if defined (__SSE__) && defined (__SSE2__) && defined (__SSE4_1__)
+template <typename PointT> std::size_t
+pcl::SampleConsensusModelSphere<PointT>::countWithinDistanceSSE (
+ const Eigen::VectorXf &model_coefficients, const double threshold, std::size_t i) const
+{
+ std::size_t nr_p = 0;
+ const __m128 a_vec = _mm_set1_ps (model_coefficients[0]);
+ const __m128 b_vec = _mm_set1_ps (model_coefficients[1]);
+ const __m128 c_vec = _mm_set1_ps (model_coefficients[2]);
+ // To avoid sqrt computation: consider one larger sphere (radius + threshold) and one smaller sphere (radius - threshold). Valid if point is in larger sphere, but not in smaller sphere.
+ const __m128 sqr_inner_radius = _mm_set1_ps ((model_coefficients[3] <= threshold ? 0.0 : (model_coefficients[3]-threshold)*(model_coefficients[3]-threshold)));
+ const __m128 sqr_outer_radius = _mm_set1_ps ((model_coefficients[3]+threshold)*(model_coefficients[3]+threshold));
+ __m128i res = _mm_set1_epi32(0); // This corresponds to nr_p: 4 32bit integers that, summed together, hold the number of inliers
+ for (; (i + 4) <= indices_->size (); i += 4)
+ {
+ const __m128 sqr_dist = sqr_dist4 (i, a_vec, b_vec, c_vec);
+ const __m128 mask = _mm_and_ps (_mm_cmplt_ps (sqr_inner_radius, sqr_dist), _mm_cmplt_ps (sqr_dist, sqr_outer_radius)); // The mask contains 1 bits if the corresponding points are inliers, else 0 bits
+ res = _mm_add_epi32 (res, _mm_and_si128 (_mm_set1_epi32 (1), _mm_castps_si128 (mask))); // The latter part creates a vector with ones (as 32bit integers) where the points are inliers
+ //const int res = _mm_movemask_ps (mask);
+ //if (res & 1) nr_p++;
+ //if (res & 2) nr_p++;
+ //if (res & 4) nr_p++;
+ //if (res & 8) nr_p++;
+ }
+ nr_p += _mm_extract_epi32 (res, 0);
+ nr_p += _mm_extract_epi32 (res, 1);
+ nr_p += _mm_extract_epi32 (res, 2);
+ nr_p += _mm_extract_epi32 (res, 3);
- ( (*input_)[(*indices_)[i]].z - model_coefficients[2] ) *
- ( (*input_)[(*indices_)[i]].z - model_coefficients[2] )
- ) - model_coefficients[3]) < threshold)
- nr_p++;
+ // Process the remaining points (at most 3)
+ nr_p += countWithinDistanceStandard (model_coefficients, threshold, i);
+ return (nr_p);
+}
+#endif
+
+//////////////////////////////////////////////////////////////////////////
+#if defined (__AVX__) && defined (__AVX2__)
+template <typename PointT> std::size_t
+pcl::SampleConsensusModelSphere<PointT>::countWithinDistanceAVX (
+ const Eigen::VectorXf &model_coefficients, const double threshold, std::size_t i) const
+{
+ std::size_t nr_p = 0;
+ const __m256 a_vec = _mm256_set1_ps (model_coefficients[0]);
+ const __m256 b_vec = _mm256_set1_ps (model_coefficients[1]);
+ const __m256 c_vec = _mm256_set1_ps (model_coefficients[2]);
+ // To avoid sqrt computation: consider one larger sphere (radius + threshold) and one smaller sphere (radius - threshold). Valid if point is in larger sphere, but not in smaller sphere.
+ const __m256 sqr_inner_radius = _mm256_set1_ps ((model_coefficients[3] <= threshold ? 0.0 : (model_coefficients[3]-threshold)*(model_coefficients[3]-threshold)));
+ const __m256 sqr_outer_radius = _mm256_set1_ps ((model_coefficients[3]+threshold)*(model_coefficients[3]+threshold));
+ __m256i res = _mm256_set1_epi32(0); // This corresponds to nr_p: 8 32bit integers that, summed together, hold the number of inliers
+ for (; (i + 8) <= indices_->size (); i += 8)
+ {
+ const __m256 sqr_dist = sqr_dist8 (i, a_vec, b_vec, c_vec);
+ const __m256 mask = _mm256_and_ps (_mm256_cmp_ps (sqr_inner_radius, sqr_dist, _CMP_LT_OQ), _mm256_cmp_ps (sqr_dist, sqr_outer_radius, _CMP_LT_OQ)); // The mask contains 1 bits if the corresponding points are inliers, else 0 bits
+ res = _mm256_add_epi32 (res, _mm256_and_si256 (_mm256_set1_epi32 (1), _mm256_castps_si256 (mask))); // The latter part creates a vector with ones (as 32bit integers) where the points are inliers
+ //const int res = _mm256_movemask_ps (mask);
+ //if (res & 1) nr_p++;
+ //if (res & 2) nr_p++;
+ //if (res & 4) nr_p++;
+ //if (res & 8) nr_p++;
+ //if (res & 16) nr_p++;
+ //if (res & 32) nr_p++;
+ //if (res & 64) nr_p++;
+ //if (res & 128) nr_p++;
}
+ nr_p += _mm256_extract_epi32 (res, 0);
+ nr_p += _mm256_extract_epi32 (res, 1);
+ nr_p += _mm256_extract_epi32 (res, 2);
+ nr_p += _mm256_extract_epi32 (res, 3);
+ nr_p += _mm256_extract_epi32 (res, 4);
+ nr_p += _mm256_extract_epi32 (res, 5);
+ nr_p += _mm256_extract_epi32 (res, 6);
+ nr_p += _mm256_extract_epi32 (res, 7);
+
+ // Process the remaining points (at most 7)
+ nr_p += countWithinDistanceStandard (model_coefficients, threshold, i);
return (nr_p);
}
+#endif
//////////////////////////////////////////////////////////////////////////
template <typename PointT> void
}
// Allocate enough space and copy the basics
- projected_points.points.resize (input_->size ());
+ projected_points.resize (input_->size ());
projected_points.header = input_->header;
projected_points.width = input_->width;
projected_points.height = input_->height;
return (false);
}
+ const float sqr_inner_radius = (model_coefficients[3] <= threshold ? 0.0f : (model_coefficients[3] - threshold) * (model_coefficients[3] - threshold));
+ const float sqr_outer_radius = (model_coefficients[3] + threshold) * (model_coefficients[3] + threshold);
+ const Eigen::Vector3f center (model_coefficients[0], model_coefficients[1], model_coefficients[2]);
for (const auto &index : indices)
{
- // Calculate the distance from the point to the sphere as the difference between
- //dist(point,sphere_origin) and sphere_radius
- if (std::abs (sqrt (
- ( (*input_)[index].x - model_coefficients[0] ) *
- ( (*input_)[index].x - model_coefficients[0] ) +
- ( (*input_)[index].y - model_coefficients[1] ) *
- ( (*input_)[index].y - model_coefficients[1] ) +
- ( (*input_)[index].z - model_coefficients[2] ) *
- ( (*input_)[index].z - model_coefficients[2] )
- ) - model_coefficients[3]) > threshold)
+ // To avoid sqrt computation: consider one larger sphere (radius + threshold) and one smaller sphere (radius - threshold).
+ // Valid if point is in larger sphere, but not in smaller sphere.
+ const float sqr_dist = ((*input_)[index].getVector3fMap () - center).squaredNorm ();
+ if ((sqr_dist > sqr_outer_radius) || (sqr_dist < sqr_inner_radius))
{
return (false);
}
#include <pcl/sample_consensus/sac_model_stick.h>
#include <pcl/common/centroid.h>
#include <pcl/common/concatenate.h>
+#include <pcl/common/eigen.h> // for eigen33
//////////////////////////////////////////////////////////////////////////
template <typename PointT> bool
// model_coefficients.template segment<3> (3).normalize ();
// We don't care about model_coefficients[6] which is the width (radius) of the stick
+ PCL_DEBUG ("[pcl::SampleConsensusModelStick::computeModelCoefficients] Model is (%g,%g,%g,%g,%g,%g).\n",
+ model_coefficients[0], model_coefficients[1], model_coefficients[2],
+ model_coefficients[3], model_coefficients[4], model_coefficients[5]);
return (true);
}
if (copy_data_fields)
{
// Allocate enough space and copy the basics
- projected_points.points.resize (input_->size ());
+ projected_points.resize (input_->size ());
projected_points.width = input_->width;
projected_points.height = input_->height;
else
{
// Allocate enough space and copy the basics
- projected_points.points.resize (inliers.size ());
+ projected_points.resize (inliers.size ());
projected_points.width = inliers.size ();
projected_points.height = 1;
* is a RANSAC-like model-fitting algorithm that can tolerate up to 50% outliers without requiring thresholds to be
* set. See Andrea Fusiello's "Elements of Geometric Computer Vision"
* (http://homepages.inf.ed.ac.uk/rbf/CVonline/LOCAL_COPIES/FUSIELLO4/tutorial.html#x1-520007) for more details.
+ * In contrast to RANSAC, LMedS does not divide the points into inliers and outliers when finding the model. Instead,
+ * it uses the median of all point-model distances as the measure of how good a model is. A threshold is only needed
+ * at the end, when it is determined which points belong to the found model.
* \author Radu B. Rusu
* \ingroup sample_consensus
*/
#pragma once
-#include <algorithm>
#include <pcl/sample_consensus/sac.h>
#include <pcl/sample_consensus/sac_model.h>
/** \brief @b MEstimatorSampleConsensus represents an implementation of the MSAC (M-estimator SAmple Consensus)
* algorithm, as described in: "MLESAC: A new robust estimator with application to estimating image geometry", P.H.S.
* Torr and A. Zisserman, Computer Vision and Image Understanding, vol 78, 2000.
+ * The difference to RANSAC is how the quality of a model is computed: RANSAC counts the number of inliers, given a
+ * threshold. The more inliers, the better the model is - it does not matter how close the inliers actually are to
+ * the model, as long as they are within the threshold. MSAC changes this by using the sum of all point-model distances
+ * as the quality measure, however outliers only add the threshold instead of their true distance. This method can lead
+ * to better results compared to RANSAC.
* \author Radu B. Rusu
* \ingroup sample_consensus
*/
namespace pcl
{
- /** \brief @b RandomSampleConsensus represents an implementation of the RANSAC (RAndom SAmple Consensus) algorithm, as
+ /** \brief @b ProgressiveSampleConsensus represents an implementation of the PROSAC (PROgressive SAmple Consensus) algorithm, as
* described in: "Matching with PROSAC – Progressive Sample Consensus", Chum, O. and Matas, J.G., CVPR, I: 220-226
* 2005.
* \author Vincent Rabaud
* described in: "Random Sample Consensus: A Paradigm for Model Fitting with Applications to Image Analysis and
* Automated Cartography", Martin A. Fischler and Robert C. Bolles, Comm. Of the ACM 24: 381–395, June 1981.
* A parallel variant is available, enable with setNumberOfThreads. Default is non-parallel.
+ *
+ * The algorithm works as follows:
+ * <ol>
+ * <li> randomly select samples from the cloud, just as many as needed to determine a model
+ * <li> compute the coefficients of the model from the samples
+ * <li> count how many points of the cloud belong to the model, given a threshold. These are called inliers
+ * <li> repeat until a good model has been found or a max number of iterations has been reached
+ * <li> return the model with the most inliers
+ * </ol>
* \author Radu B. Rusu
* \ingroup sample_consensus
*/
using SampleConsensus<PointT>::probability_;
using SampleConsensus<PointT>::threads_;
- /** \brief RANSAC (RAndom SAmple Consensus) main constructor
+ /** \brief RANSAC (RANdom SAmple Consensus) main constructor
* \param[in] model a Sample Consensus model
*/
RandomSampleConsensus (const SampleConsensusModelPtr &model)
max_iterations_ = 10000;
}
- /** \brief RANSAC (RAndom SAmple Consensus) main constructor
+ /** \brief RANSAC (RANdom SAmple Consensus) main constructor
* \param[in] model a Sample Consensus model
* \param[in] threshold distance to model threshold
*/
#pragma once
-#include <algorithm>
#include <pcl/sample_consensus/sac.h>
#include <pcl/sample_consensus/sac_model.h>
namespace pcl
{
- /** \brief @b RandomizedRandomSampleConsensus represents an implementation of the RRANSAC (Randomized RAndom SAmple
+ /** \brief @b RandomizedRandomSampleConsensus represents an implementation of the RRANSAC (Randomized RANdom SAmple
* Consensus), as described in "Randomized RANSAC with Td,d test", O. Chum and J. Matas, Proc. British Machine Vision
* Conf. (BMVC '02), vol. 2, BMVA, pp. 448-457, 2002.
+ *
+ * The algorithm works similar to RANSAC, with one addition: after computing the model coefficients, randomly select a fraction
+ * of points. If any of these points do not belong to the model (given a threshold), continue with the next iteration instead
+ * of checking all points. This may speed up the finding of the model if the fraction of points to pre-test is chosen well.
* \note RRANSAC is useful in situations where most of the data samples belong to the model, and a fast outlier rejection algorithm is needed.
* \author Radu B. Rusu
* \ingroup sample_consensus
using SampleConsensus<PointT>::inliers_;
using SampleConsensus<PointT>::probability_;
- /** \brief RANSAC (Randomized RAndom SAmple Consensus) main constructor
+ /** \brief RRANSAC (Randomized RANdom SAmple Consensus) main constructor
* \param[in] model a Sample Consensus model
*/
RandomizedRandomSampleConsensus (const SampleConsensusModelPtr &model)
max_iterations_ = 10000;
}
- /** \brief RRANSAC (RAndom SAmple Consensus) main constructor
+ /** \brief RRANSAC (Randomized RANdom SAmple Consensus) main constructor
* \param[in] model a Sample Consensus model
* \param[in] threshold distance to model threshold
*/
#pragma once
-#include <pcl/sample_consensus/boost.h>
#include <pcl/sample_consensus/sac_model.h>
#include <pcl/pcl_base.h>
+#include <boost/random/mersenne_twister.hpp> // for mt19937
+#include <boost/random/uniform_01.hpp> // for uniform_01
+
#include <ctime>
#include <memory>
#include <set>
#pragma once
-#include <cfloat>
#include <ctime>
#include <climits>
#include <memory>
#include <set>
+#include <boost/random/mersenne_twister.hpp> // for mt19937
+#include <boost/random/uniform_int.hpp> // for uniform_int
+#include <boost/random/variate_generator.hpp> // for variate_generator
#include <pcl/memory.h>
-#include <pcl/pcl_macros.h>
-#include <pcl/pcl_base.h>
#include <pcl/console/print.h>
#include <pcl/point_cloud.h>
#include <pcl/types.h> // for index_t, Indices
-#include <pcl/sample_consensus/boost.h>
#include <pcl/sample_consensus/model_types.h>
#include <pcl/search/search.h>
, samples_radius_ (0.)
, samples_radius_search_ ()
, rng_dist_ (new boost::uniform_int<> (0, std::numeric_limits<int>::max ()))
+ , custom_model_constraints_ ([](auto){return true;})
{
// Create a random number generator object
if (random)
, samples_radius_ (0.)
, samples_radius_search_ ()
, rng_dist_ (new boost::uniform_int<> (0, std::numeric_limits<int>::max ()))
+ , custom_model_constraints_ ([](auto){return true;})
{
if (random)
rng_alg_.seed (static_cast<unsigned> (std::time (nullptr)));
, samples_radius_ (0.)
, samples_radius_search_ ()
, rng_dist_ (new boost::uniform_int<> (0, std::numeric_limits<int>::max ()))
+ , custom_model_constraints_ ([](auto){return true;})
{
if (random)
rng_alg_.seed (static_cast<unsigned> (std::time(nullptr)));
max_radius = radius_max_;
}
+ /** \brief This can be used to impose any kind of constraint on the model,
+ * e.g. that it has a specific direction, size, or anything else.
+ * \param[in] function A function that gets model coefficients and returns whether the model is acceptable or not.
+ */
+ inline void
+ setModelConstraints (std::function<bool(const Eigen::VectorXf &)> function)
+ {
+ if (!function)
+ {
+ PCL_ERROR ("[pcl::SampleConsensusModel::setModelConstraints] The given function is empty (i.e. does not contain a callable target)!\n");
+ return;
+ }
+ custom_model_constraints_ = std::move (function);
+ }
+
/** \brief Set the maximum distance allowed when drawing random samples
* \param[in] radius the maximum distance (L2 norm)
* \param search
PCL_ERROR ("[pcl::%s::isModelValid] Invalid number of model coefficients given (is %lu, should be %lu)!\n", getClassName ().c_str (), model_coefficients.size (), model_size_);
return (false);
}
+ if (!custom_model_constraints_(model_coefficients))
+ {
+ PCL_DEBUG ("[pcl::%s::isModelValid] The user defined isModelValid function returned false.\n", getClassName ().c_str ());
+ return (false);
+ }
return (true);
}
{
return ((*rng_gen_) ());
}
+
+ /** \brief A user defined function that takes model coefficients and returns whether the model is acceptable or not. */
+ std::function<bool(const Eigen::VectorXf &)> custom_model_constraints_;
public:
PCL_MAKE_ALIGNED_OPERATOR_NEW
};
/** \brief @b SampleConsensusModelFromNormals represents the base model class
* for models that require the use of surface normals for estimation.
+ * \ingroup sample_consensus
*/
template <typename PointT, typename PointNT>
class SampleConsensusModelFromNormals //: public SampleConsensusModel<PointT>
inline void
setNormalDistanceWeight (const double w)
{
+ if (w < 0.0 || w > 1.0)
+ {
+ PCL_ERROR ("[pcl::SampleConsensusModel::setNormalDistanceWeight] w is %g, but should be in [0; 1]. Weight will not be set.", w);
+ return;
+ }
normal_distance_weight_ = w;
}
#pragma once
+#ifdef __SSE__
+#include <xmmintrin.h> // for __m128
+#endif // ifdef __SSE__
+#ifdef __AVX__
+#include <immintrin.h> // for __m256
+#endif // ifdef __AVX__
+
#include <pcl/sample_consensus/sac_model.h>
#include <pcl/sample_consensus/model_types.h>
bool
isSampleGood(const Indices &samples) const override;
+ /** This implementation uses no SIMD instructions. It is not intended for normal use.
+ * See countWithinDistance which automatically uses the fastest implementation.
+ */
+ std::size_t
+ countWithinDistanceStandard (const Eigen::VectorXf &model_coefficients,
+ const double threshold,
+ std::size_t i = 0) const;
+
+#if defined (__SSE__) && defined (__SSE2__) && defined (__SSE4_1__)
+ /** This implementation uses SSE, SSE2, and SSE4.1 instructions. It is not intended for normal use.
+ * See countWithinDistance which automatically uses the fastest implementation.
+ */
+ std::size_t
+ countWithinDistanceSSE (const Eigen::VectorXf &model_coefficients,
+ const double threshold,
+ std::size_t i = 0) const;
+#endif
+
+#if defined (__AVX__) && defined (__AVX2__)
+ /** This implementation uses AVX and AVX2 instructions. It is not intended for normal use.
+ * See countWithinDistance which automatically uses the fastest implementation.
+ */
+ std::size_t
+ countWithinDistanceAVX (const Eigen::VectorXf &model_coefficients,
+ const double threshold,
+ std::size_t i = 0) const;
+#endif
+
private:
/** \brief Functor for the optimization function */
struct OptimizationFunctor : pcl::Functor<float>
const pcl::SampleConsensusModelCircle2D<PointT> *model_;
const Indices &indices_;
};
+
+#ifdef __AVX__
+ inline __m256 sqr_dist8 (const std::size_t i, const __m256 a_vec, const __m256 b_vec) const;
+#endif
+
+#ifdef __SSE__
+ inline __m128 sqr_dist4 (const std::size_t i, const __m128 a_vec, const __m128 b_vec) const;
+#endif
};
}
#include <pcl/sample_consensus/sac_model.h>
#include <pcl/sample_consensus/model_types.h>
-#include <pcl/common/common.h>
#include <pcl/common/distances.h>
-#include <climits>
namespace pcl
{
#include <pcl/sample_consensus/sac_model.h>
#include <pcl/sample_consensus/model_types.h>
-#include <pcl/common/common.h>
#include <pcl/common/distances.h>
namespace pcl
#include <pcl/sample_consensus/sac_model.h>
#include <pcl/sample_consensus/model_types.h>
-#include <pcl/common/eigen.h>
namespace pcl
{
protected:
using SampleConsensusModel<PointT>::sample_size_;
using SampleConsensusModel<PointT>::model_size_;
+
+ /** This implementation uses no SIMD instructions. It is not intended for normal use.
+ * See countWithinDistance which automatically uses the fastest implementation.
+ */
+ std::size_t
+ countWithinDistanceStandard (const Eigen::VectorXf &model_coefficients,
+ const double threshold,
+ std::size_t i = 0) const;
+
+#if defined (__SSE__) && defined (__SSE2__) && defined (__SSE4_1__)
+ /** This implementation uses SSE, SSE2, and SSE4.1 instructions. It is not intended for normal use.
+ * See countWithinDistance which automatically uses the fastest implementation.
+ */
+ std::size_t
+ countWithinDistanceSSE (const Eigen::VectorXf &model_coefficients,
+ const double threshold,
+ std::size_t i = 0) const;
+#endif
+
+#if defined (__AVX__) && defined (__AVX2__)
+ /** This implementation uses AVX and AVX2 instructions. It is not intended for normal use.
+ * See countWithinDistance which automatically uses the fastest implementation.
+ */
+ std::size_t
+ countWithinDistanceAVX (const Eigen::VectorXf &model_coefficients,
+ const double threshold,
+ std::size_t i = 0) const;
+#endif
};
}
#include <pcl/sample_consensus/sac_model.h>
#include <pcl/sample_consensus/sac_model_sphere.h>
#include <pcl/sample_consensus/model_types.h>
-#include <pcl/common/common.h>
#include <pcl/memory.h>
#include <pcl/pcl_macros.h>
#pragma once
#include <pcl/sample_consensus/sac_model_plane.h>
-#include <pcl/common/common.h>
namespace pcl
{
#pragma once
#include <pcl/sample_consensus/sac_model_plane.h>
-#include <pcl/common/common.h>
namespace pcl
{
#pragma once
+#ifdef __SSE__
+#include <xmmintrin.h> // for __m128
+#endif // ifdef __SSE__
+#ifdef __AVX__
+#include <immintrin.h> // for __m256
+#endif // ifdef __AVX__
+
#include <pcl/sample_consensus/sac_model.h>
#include <pcl/sample_consensus/model_types.h>
using SampleConsensusModel<PointT>::sample_size_;
using SampleConsensusModel<PointT>::model_size_;
+ /** This implementation uses no SIMD instructions. It is not intended for normal use.
+ * See countWithinDistance which automatically uses the fastest implementation.
+ */
+ std::size_t
+ countWithinDistanceStandard (const Eigen::VectorXf &model_coefficients,
+ const double threshold,
+ std::size_t i = 0) const;
+
+#if defined (__SSE__) && defined (__SSE2__) && defined (__SSE4_1__)
+ /** This implementation uses SSE, SSE2, and SSE4.1 instructions. It is not intended for normal use.
+ * See countWithinDistance which automatically uses the fastest implementation.
+ */
+ std::size_t
+ countWithinDistanceSSE (const Eigen::VectorXf &model_coefficients,
+ const double threshold,
+ std::size_t i = 0) const;
+#endif
+
+#if defined (__AVX__) && defined (__AVX2__)
+ /** This implementation uses AVX and AVX2 instructions. It is not intended for normal use.
+ * See countWithinDistance which automatically uses the fastest implementation.
+ */
+ std::size_t
+ countWithinDistanceAVX (const Eigen::VectorXf &model_coefficients,
+ const double threshold,
+ std::size_t i = 0) const;
+#endif
+
+#ifdef __AVX__
+ inline __m256 dist8 (const std::size_t i, const __m256 &a_vec, const __m256 &b_vec, const __m256 &c_vec, const __m256 &d_vec, const __m256 &abs_help) const;
+#endif
+
+#ifdef __SSE__
+ inline __m128 dist4 (const std::size_t i, const __m128 &a_vec, const __m128 &b_vec, const __m128 &c_vec, const __m128 &d_vec, const __m128 &abs_help) const;
+#endif
+
private:
/** \brief Check if a sample of indices results in a good sample of points
* indices.
#include <pcl/memory.h>
#include <pcl/pcl_macros.h>
#include <pcl/pcl_base.h>
-#include <pcl/sample_consensus/eigen.h>
#include <pcl/sample_consensus/sac_model.h>
#include <pcl/sample_consensus/model_types.h>
#include <pcl/common/eigen.h>
#include <pcl/common/centroid.h>
#include <map>
+#include <numeric> // for std::iota
namespace pcl
{
setInputTarget (const PointCloudConstPtr &target)
{
target_ = target;
- indices_tgt_.reset (new Indices);
// Cache the size and fill the target indices
- int target_size = static_cast<int> (target->size ());
- indices_tgt_->resize (target_size);
-
- for (int i = 0; i < target_size; ++i)
- (*indices_tgt_)[i] = i;
+ const index_t target_size = static_cast<index_t> (target->size ());
+ indices_tgt_.reset (new Indices (target_size));
+ std::iota (indices_tgt_->begin (), indices_tgt_->end (), 0);
computeOriginalIndexMapping ();
}
void
computeOriginalIndexMapping ()
{
- if (!indices_tgt_ || !indices_ || indices_->empty () || indices_->size () != indices_tgt_->size ())
+ if (!indices_tgt_)
+ {
+ PCL_DEBUG ("[pcl::SampleConsensusModelRegistration::computeOriginalIndexMapping] Cannot compute mapping: indices_tgt_ is null.\n");
+ return;
+ }
+ if (!indices_)
+ {
+ PCL_DEBUG ("[pcl::SampleConsensusModelRegistration::computeOriginalIndexMapping] Cannot compute mapping: indices_ is null.\n");
+ return;
+ }
+ if (indices_->empty ())
+ {
+ PCL_DEBUG ("[pcl::SampleConsensusModelRegistration::computeOriginalIndexMapping] Cannot compute mapping: indices_ is empty.\n");
+ return;
+ }
+ if (indices_->size () != indices_tgt_->size ())
+ {
+ PCL_DEBUG ("[pcl::SampleConsensusModelRegistration::computeOriginalIndexMapping] Cannot compute mapping: indices_ and indices_tgt_ are not the same size (%zu vs %zu).\n",
+ indices_->size (), indices_tgt_->size ());
return;
+ }
for (std::size_t i = 0; i < indices_->size (); ++i)
correspondences_[(*indices_)[i]] = (*indices_tgt_)[i];
+ PCL_DEBUG ("[pcl::SampleConsensusModelRegistration::computeOriginalIndexMapping] Successfully computed mapping.\n");
}
/** \brief A boost shared pointer to the target point cloud data array. */
IndicesPtr indices_tgt_;
/** \brief Given the index in the original point cloud, give the matching original index in the target cloud */
- std::map<int, int> correspondences_;
+ std::map<index_t, index_t> correspondences_;
/** \brief Internal distance threshold used for the sample selection step. */
double sample_dist_thresh_;
#pragma once
+#ifdef __SSE__
+#include <xmmintrin.h> // for __m128
+#endif // ifdef __SSE__
+#ifdef __AVX__
+#include <immintrin.h> // for __m256
+#endif // ifdef __AVX__
+
#include <pcl/sample_consensus/sac_model.h>
#include <pcl/sample_consensus/model_types.h>
bool
isSampleGood(const Indices &samples) const override;
+ /** This implementation uses no SIMD instructions. It is not intended for normal use.
+ * See countWithinDistance which automatically uses the fastest implementation.
+ */
+ std::size_t
+ countWithinDistanceStandard (const Eigen::VectorXf &model_coefficients,
+ const double threshold,
+ std::size_t i = 0) const;
+
+#if defined (__SSE__) && defined (__SSE2__) && defined (__SSE4_1__)
+ /** This implementation uses SSE, SSE2, and SSE4.1 instructions. It is not intended for normal use.
+ * See countWithinDistance which automatically uses the fastest implementation.
+ */
+ std::size_t
+ countWithinDistanceSSE (const Eigen::VectorXf &model_coefficients,
+ const double threshold,
+ std::size_t i = 0) const;
+#endif
+
+#if defined (__AVX__) && defined (__AVX2__)
+ /** This implementation uses AVX and AVX2 instructions. It is not intended for normal use.
+ * See countWithinDistance which automatically uses the fastest implementation.
+ */
+ std::size_t
+ countWithinDistanceAVX (const Eigen::VectorXf &model_coefficients,
+ const double threshold,
+ std::size_t i = 0) const;
+#endif
+
private:
struct OptimizationFunctor : pcl::Functor<float>
{
const pcl::SampleConsensusModelSphere<PointT> *model_;
const Indices &indices_;
};
+
+#ifdef __AVX__
+ inline __m256 sqr_dist8 (const std::size_t i, const __m256 a_vec, const __m256 b_vec, const __m256 c_vec) const;
+#endif
+
+#ifdef __SSE__
+ inline __m128 sqr_dist4 (const std::size_t i, const __m128 a_vec, const __m128 b_vec, const __m128 c_vec) const;
+#endif
};
}
#include <pcl/sample_consensus/sac_model.h>
#include <pcl/sample_consensus/model_types.h>
-#include <pcl/common/eigen.h>
namespace pcl
{
\section secSampleConsensusPresentation Overview
The <b>pcl_sample_consensus</b> library holds SAmple Consensus (SAC) methods like
- <a href="http://en.wikipedia.org/wiki/RANSAC">RANSAC</a> and models like planes and cylinders. These can
+ <a href="http://en.wikipedia.org/wiki/RANSAC">RANSAC</a> and models like planes and cylinders. These can be
combined freely in order to detect specific models and their parameters in point clouds.
Some of the models implemented in this library include: lines, planes, cylinders, and spheres. Plane fitting
The following list describes the robust sample consensus estimators implemented:
<ul>
<li><a href="http://en.wikipedia.org/wiki/RANSAC">SAC_RANSAC</a> - RANdom SAmple Consensus</li>
- <li><a href="http://research.microsoft.com/en-us/um/people/zhang/INRIA/Publis/Tutorial-Estim/node25.html">SAC_LMEDS</a> - Least Median of Squares</li>
- <li><a href="http://www.robots.ox.ac.uk/~vgg/publications-new/Public/2000/Torr00/torr00.pdf">SAC_MSAC</a> - M-Estimator SAmple Consensus</li>
- <li><a href="http://cmp.felk.cvut.cz/~matas/papers/presentations/rransac-cvww02_pres.pdf">SAC_RRANSAC</a> - Randomized RANSAC</li>
- <li><a href="http://cmp.felk.cvut.cz/~matas/papers/presentations/rransac-cvww02_pres.pdf">SAC_RMSAC</a> - Randomized MSAC</li>
- <li><a href="http://www.robots.ox.ac.uk/~vgg/publications-new/Public/2000/Torr00/torr00.pdf">SAC_MLESAC</a> - Maximum LikeLihood Estimation SAmple Consensus</li>
+ <li><a href="https://www-sop.inria.fr/odyssee/software/old_robotvis/Tutorial-Estim/node25.html">SAC_LMEDS</a> - Least Median of Squares</li>
+ <li><a href="https://www.robots.ox.ac.uk/~vgg/publications/2000/Torr00/torr00.pdf">SAC_MSAC</a> - M-Estimator SAmple Consensus</li>
+ <li><a href="https://web.archive.org/web/20170811194151/http://www.bmva.org/bmvc/2002/papers/50/full_50.pdf">SAC_RRANSAC</a> - Randomized RANSAC</li>
+ <li><a href="https://web.archive.org/web/20170811194151/http://www.bmva.org/bmvc/2002/papers/50/full_50.pdf">SAC_RMSAC</a> - Randomized MSAC</li>
+ <li><a href="https://www.robots.ox.ac.uk/~vgg/publications/2000/Torr00/torr00.pdf">SAC_MLESAC</a> - Maximum LikeLihood Estimation SAmple Consensus</li>
<li><a href="http://cmp.felk.cvut.cz/~matas/papers/chum-prosac-cvpr05.pdf">SAC_PROSAC</a> - PROgressive SAmple Consensus</li>
</ul>
#include <flann/algorithms/kmeans_index.h>
#include <pcl/search/flann_search.h>
+#include <pcl/kdtree/kdtree_flann.h> // for radius_search, knn_search
+// @TODO: remove once constexpr makes it easy to have the function in the header only
+#include <pcl/kdtree/impl/kdtree_flann.hpp>
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT, typename FlannDistance>
indices.resize (k,-1);
if (dists.size() != static_cast<unsigned int> (k))
dists.resize (k);
- flann::Matrix<index_t> i (&indices[0],1,k);
flann::Matrix<float> d (&dists[0],1,k);
- int result = index_->knnSearch (m,i,d,k, p);
+ int result = knn_search(*index_, m, indices, d, k, p);
delete [] data;
p.sorted = sorted_results_;
p.eps = eps_;
p.checks = checks_;
- index_->knnSearch (m,k_indices,k_sqr_distances,k, p);
+ knn_search(*index_, m, k_indices, k_sqr_distances, k, p);
delete [] data;
}
p.sorted = sorted_results_;
p.eps = eps_;
p.checks = checks_;
- index_->knnSearch (m,k_indices,k_sqr_distances,k, p);
+ knn_search(*index_, m, k_indices, k_sqr_distances, k, p);
delete[] data;
}
p.checks = checks_;
std::vector<Indices> i (1);
std::vector<std::vector<float> > d (1);
- int result = index_->radiusSearch (m,i,d,static_cast<float> (radius * radius), p);
+ int result = radius_search(*index_, m, i, d, static_cast<float>(radius * radius), p);
delete [] data;
indices = i [0];
p.checks = checks_;
// here: max_nn==0: take all neighbors. flann: max_nn==0: return no neighbors, only count them. max_nn==-1: return all neighbors
p.max_neighbors = max_nn != 0 ? max_nn : -1;
- index_->radiusSearch (m,k_indices,k_sqr_distances,static_cast<float> (radius * radius), p);
+ radius_search(
+ *index_, m, k_indices, k_sqr_distances, static_cast<float>(radius * radius), p);
delete [] data;
}
p.checks = checks_;
// here: max_nn==0: take all neighbors. flann: max_nn==0: return no neighbors, only count them. max_nn==-1: return all neighbors
p.max_neighbors = max_nn != 0 ? max_nn : -1;
- index_->radiusSearch (m, k_indices, k_sqr_distances, static_cast<float> (radius * radius), p);
+ radius_search(
+ *index_, m, k_indices, k_sqr_distances, static_cast<float>(radius * radius), p);
delete[] data;
}
#pragma once
#include <pcl/search/organized.h>
-#include <pcl/common/eigen.h>
#include <pcl/common/point_tests.h> // for pcl::isFinite
-#include <pcl/common/time.h>
+#include <pcl/common/projection_matrix.h> // for getCameraMatrixFromProjectionMatrix, ...
#include <Eigen/Eigenvalues>
//////////////////////////////////////////////////////////////////////////////////////////////
unsigned left, right, top, bottom;
//unsigned x, y, idx;
float squared_distance;
- double squared_radius;
+ const float squared_radius = radius * radius;
k_indices.clear ();
k_sqr_distances.clear ();
- squared_radius = radius * radius;
-
- this->getProjectedRadiusSearchBox (query, static_cast<float> (squared_radius), left, right, top, bottom);
+ this->getProjectedRadiusSearchBox (query, squared_radius, left, right, top, bottom);
// iterate over search box
if (max_nn == 0 || max_nn >= static_cast<unsigned int> (input_->size ()))
unsigned top = 0;
unsigned bottom = input_->height - 1;
- std::priority_queue <Entry> results;
- //std::vector<Entry> k_results;
- //k_results.reserve (k);
+ std::vector <Entry> results; // sorted from smallest to largest distance
+ results.reserve (k);
// add point laying on the projection of the query point.
if (xBegin >= 0 &&
xBegin < static_cast<int> (input_->width) &&
}
// stop here means that the k-nearest neighbor changed -> recalculate bounding box of ellipse.
if (stop)
- getProjectedRadiusSearchBox (query, results.top ().distance, left, right, top, bottom);
+ getProjectedRadiusSearchBox (query, results.back ().distance, left, right, top, bottom);
}
// now we use it as stop flag -> if bounding box is completely within the already examined search box were done!
} while (!stop);
- k_indices.resize (results.size ());
- k_sqr_distances.resize (results.size ());
- std::size_t idx = results.size () - 1;
- while (!results.empty ())
+ const auto results_size = results.size ();
+ k_indices.resize (results_size);
+ k_sqr_distances.resize (results_size);
+ std::size_t idx = 0;
+ for(const auto& result : results)
{
- k_indices [idx] = results.top ().index;
- k_sqr_distances [idx] = results.top ().distance;
- results.pop ();
- --idx;
+ k_indices [idx] = result.index;
+ k_sqr_distances [idx] = result.distance;
+ ++idx;
}
- return (static_cast<int> (k_indices.size ()));
+ return (static_cast<int> (results_size));
}
////////////////////////////////////////////////////////////////////////////////////////////
{
return (tree_->approxNearestSearch (query_index, result_index, sqr_distance));
}
-
+ /** \brief Search for points within rectangular search area
+ * \param[in] min_pt lower corner of search area
+ * \param[in] max_pt upper corner of search area
+ * \param[out] k_indices the resultant point indices
+ * \return number of points found within search area
+ */
+ inline uindex_t
+ boxSearch(const Eigen::Vector3f &min_pt, const Eigen::Vector3f &max_pt, Indices &k_indices) const
+ {
+ return (tree_->boxSearch(min_pt, max_pt, k_indices));
+ }
};
}
}
#include <pcl/memory.h>
#include <pcl/pcl_macros.h>
#include <pcl/point_cloud.h>
-#include <pcl/point_types.h>
#include <pcl/search/search.h>
#include <pcl/common/eigen.h>
#include <algorithm>
-#include <queue>
#include <vector>
-#include <pcl/common/projection_matrix.h>
namespace pcl
{
* \return whether the top element changed or not.
*/
inline bool
- testPoint (const PointT& query, unsigned k, std::priority_queue<Entry>& queue, index_t index) const
+ testPoint (const PointT& query, unsigned k, std::vector<Entry>& queue, index_t index) const
{
const PointT& point = input_->points [index];
if (mask_ [index] && std::isfinite (point.x))
float dist_y = point.y - query.y;
float dist_z = point.z - query.z;
float squared_distance = dist_x * dist_x + dist_y * dist_y + dist_z * dist_z;
- if (queue.size () < k)
+ const auto queue_size = queue.size ();
+ const auto insert_into_queue = [&]{ queue.emplace (
+ std::upper_bound (queue.begin(), queue.end(), squared_distance,
+ [](float dist, const Entry& ent){ return dist<ent.distance; }),
+ index, squared_distance); };
+ if (queue_size < k)
{
- queue.push (Entry (index, squared_distance));
- return queue.size () == k;
+ insert_into_queue ();
+ return (queue_size + 1) == k;
}
- if (queue.top ().distance > squared_distance)
+ if (queue.back ().distance > squared_distance)
{
- queue.pop ();
- queue.push (Entry (index, squared_distance));
+ queue.pop_back ();
+ insert_into_queue ();
return true; // top element has changed!
}
}
* \param[out] ground indices of points determined to be ground returns.
*/
virtual void
- extract (std::vector<int>& ground);
+ extract (Indices& ground);
protected:
*/
#pragma once
-
+PCL_DEPRECATED_HEADER(1, 15, "Please include the needed boost headers directly.")
#ifdef __GNUC__
#pragma GCC system_header
#endif
#include <pcl/memory.h>
#include <pcl/pcl_base.h>
#include <pcl/pcl_macros.h>
-#include <pcl/search/pcl_search.h>
+#include <pcl/console/print.h> // for PCL_WARN
+#include <pcl/search/search.h> // for Search
#include <functional>
#include <pcl/segmentation/lccp_segmentation.h>
#include <pcl/sample_consensus/sac.h>
-#include <pcl/sample_consensus/sac_model_plane.h>
#include <pcl/segmentation/extract_clusters.h>
#define PCL_INSTANTIATE_CPCSegmentation(T) template class PCL_EXPORTS pcl::CPCSegmentation<T>;
// Maximum number of trials before we give up.
max_iterations_ = 10000;
use_directed_weights_ = false;
- model_pt_indices_.reset (new std::vector<int>);
- full_cloud_pt_indices_.reset (new std::vector<int> (* (sac_model_->getIndices ())));
+ model_pt_indices_.reset (new Indices);
+ full_cloud_pt_indices_.reset (new Indices (* (sac_model_->getIndices ())));
point_cloud_ptr_ = sac_model_->getInputCloud ();
}
#include <pcl/ml/densecrf.h>
#include <pcl/filters/voxel_grid.h>
-#include <pcl/filters/voxel_grid_label.h>
//#include <pcl/ml/densecrfORI.h>
#pragma once
-#include <pcl/segmentation/boost.h>
#include <pcl/segmentation/plane_coefficient_comparator.h>
namespace pcl
#pragma once
+#include <set> // for std::set
#include <pcl/memory.h>
#include <pcl/pcl_macros.h>
#include <pcl/point_types.h>
-#include <pcl/segmentation/boost.h>
#include <pcl/segmentation/comparator.h>
namespace pcl
{
- namespace experimental
+ /** \brief EuclideanClusterComparator is a comparator used for finding clusters based on euclidian distance.
+ *
+ * \author Alex Trevor
+ */
+ template<typename PointT, typename PointLT = pcl::Label>
+ class EuclideanClusterComparator : public ::pcl::Comparator<PointT>
{
- template<typename PointT, typename PointLT = pcl::Label>
- class EuclideanClusterComparator : public ::pcl::Comparator<PointT>
- {
- protected:
+ protected:
- using pcl::Comparator<PointT>::input_;
+ using pcl::Comparator<PointT>::input_;
- public:
- using typename Comparator<PointT>::PointCloud;
- using typename Comparator<PointT>::PointCloudConstPtr;
+ public:
+ using typename Comparator<PointT>::PointCloud;
+ using typename Comparator<PointT>::PointCloudConstPtr;
- using PointCloudL = pcl::PointCloud<PointLT>;
- using PointCloudLPtr = typename PointCloudL::Ptr;
- using PointCloudLConstPtr = typename PointCloudL::ConstPtr;
+ using PointCloudL = pcl::PointCloud<PointLT>;
+ using PointCloudLPtr = typename PointCloudL::Ptr;
+ using PointCloudLConstPtr = typename PointCloudL::ConstPtr;
- using Ptr = shared_ptr<EuclideanClusterComparator<PointT, PointLT> >;
- using ConstPtr = shared_ptr<const EuclideanClusterComparator<PointT, PointLT> >;
+ using Ptr = shared_ptr<EuclideanClusterComparator<PointT, PointLT> >;
+ using ConstPtr = shared_ptr<const EuclideanClusterComparator<PointT, PointLT> >;
- using ExcludeLabelSet = std::set<std::uint32_t>;
- using ExcludeLabelSetPtr = shared_ptr<ExcludeLabelSet>;
- using ExcludeLabelSetConstPtr = shared_ptr<const ExcludeLabelSet>;
+ using ExcludeLabelSet = std::set<std::uint32_t>;
+ using ExcludeLabelSetPtr = shared_ptr<ExcludeLabelSet>;
+ using ExcludeLabelSetConstPtr = shared_ptr<const ExcludeLabelSet>;
- /** \brief Default constructor for EuclideanClusterComparator. */
- EuclideanClusterComparator ()
- : distance_threshold_ (0.005f)
- , depth_dependent_ ()
- {}
+ /** \brief Default constructor for EuclideanClusterComparator. */
+ EuclideanClusterComparator() = default;
- void
- setInputCloud (const PointCloudConstPtr& cloud) override
- {
- input_ = cloud;
- Eigen::Matrix3f rot = input_->sensor_orientation_.toRotationMatrix ();
- z_axis_ = rot.col (2);
- }
+ void
+ setInputCloud (const PointCloudConstPtr& cloud) override
+ {
+ input_ = cloud;
+ Eigen::Matrix3f rot = input_->sensor_orientation_.toRotationMatrix ();
+ z_axis_ = rot.col (2);
+ }
- /** \brief Set the tolerance in meters for difference in perpendicular distance (d component of plane equation) to the plane between neighboring points, to be considered part of the same plane.
- * \param[in] distance_threshold the tolerance in meters
- * \param depth_dependent
- */
- inline void
- setDistanceThreshold (float distance_threshold, bool depth_dependent)
- {
- distance_threshold_ = distance_threshold;
- depth_dependent_ = depth_dependent;
- }
+ /** \brief Set the tolerance in meters for difference in perpendicular distance (d component of plane equation) to the plane between neighboring points, to be considered part of the same plane.
+ * \param[in] distance_threshold the tolerance in meters
+ * \param depth_dependent
+ */
+ inline void
+ setDistanceThreshold (float distance_threshold, bool depth_dependent)
+ {
+ distance_threshold_ = distance_threshold;
+ depth_dependent_ = depth_dependent;
+ }
- /** \brief Get the distance threshold in meters (d component of plane equation) between neighboring points, to be considered part of the same plane. */
- inline float
- getDistanceThreshold () const
- {
- return (distance_threshold_);
- }
+ /** \brief Get the distance threshold in meters (d component of plane equation) between neighboring points, to be considered part of the same plane. */
+ inline float
+ getDistanceThreshold () const
+ {
+ return distance_threshold_;
+ }
- /** \brief Set label cloud
- * \param[in] labels The label cloud
- */
- void
- setLabels (const PointCloudLPtr& labels)
- {
- labels_ = labels;
- }
+ /** \brief Get if depth dependent */
+ inline bool
+ getDepthDependent() const
+ {
+ return depth_dependent_;
+ }
- const ExcludeLabelSetConstPtr&
- getExcludeLabels () const
- {
- return exclude_labels_;
- }
+ /** \brief Set label cloud
+ * \param[in] labels The label cloud
+ */
+ void
+ setLabels (const PointCloudLPtr& labels)
+ {
+ labels_ = labels;
+ }
- /** \brief Set labels in the label cloud to exclude.
- * \param exclude_labels a vector of bools corresponding to whether or not a given label should be considered
- */
- void
- setExcludeLabels (const ExcludeLabelSetConstPtr &exclude_labels)
- {
- exclude_labels_ = exclude_labels;
- }
+ /** \brief Get labels */
+ const PointCloudLPtr&
+ getLabels() const
+ {
+ return labels_;
+ }
- /** \brief Compare points at two indices by their euclidean distance
- * \param idx1 The first index for the comparison
- * \param idx2 The second index for the comparison
- */
- bool
- compare (int idx1, int idx2) const override
- {
- if (labels_ && exclude_labels_)
- {
- assert (labels_->size () == input_->size ());
- const std::uint32_t &label1 = (*labels_)[idx1].label;
- const std::uint32_t &label2 = (*labels_)[idx2].label;
+ /** \brief Get exlude labels */
+ const ExcludeLabelSetConstPtr&
+ getExcludeLabels () const
+ {
+ return exclude_labels_;
+ }
- const std::set<std::uint32_t>::const_iterator it1 = exclude_labels_->find (label1);
- if (it1 == exclude_labels_->end ())
- return false;
+ /** \brief Set labels in the label cloud to exclude.
+ * \param exclude_labels a vector of bools corresponding to whether or not a given label should be considered
+ */
+ void
+ setExcludeLabels (const ExcludeLabelSetConstPtr &exclude_labels)
+ {
+ exclude_labels_ = exclude_labels;
+ }
- const std::set<std::uint32_t>::const_iterator it2 = exclude_labels_->find (label2);
- if (it2 == exclude_labels_->end ())
- return false;
- }
+ /** \brief Compare points at two indices by their euclidean distance
+ * \param idx1 The first index for the comparison
+ * \param idx2 The second index for the comparison
+ */
+ bool
+ compare (int idx1, int idx2) const override
+ {
+ if (labels_ && exclude_labels_)
+ {
+ assert (labels_->size () == input_->size ());
+ const std::uint32_t &label1 = (*labels_)[idx1].label;
+ const std::uint32_t &label2 = (*labels_)[idx2].label;
- float dist_threshold = distance_threshold_;
- if (depth_dependent_)
- {
- Eigen::Vector3f vec = (*input_)[idx1].getVector3fMap ();
- float z = vec.dot (z_axis_);
- dist_threshold *= z * z;
- }
+ const std::set<std::uint32_t>::const_iterator it1 = exclude_labels_->find (label1);
+ if (it1 == exclude_labels_->end ())
+ return false;
- const float dist = ((*input_)[idx1].getVector3fMap ()
- - (*input_)[idx2].getVector3fMap ()).norm ();
- return (dist < dist_threshold);
+ const std::set<std::uint32_t>::const_iterator it2 = exclude_labels_->find (label2);
+ if (it2 == exclude_labels_->end ())
+ return false;
}
- protected:
-
-
- /** \brief Set of labels with similar size as the input point cloud,
- * aggregating points into groups based on a similar label identifier.
- *
- * It needs to be set in conjunction with the \ref exclude_labels_
- * member in order to provided a masking functionality.
- */
- PointCloudLPtr labels_;
-
- /** \brief Specifies which labels should be excluded com being clustered.
- *
- * If a label is not specified, it's assumed by default that it's
- * intended be excluded
- */
- ExcludeLabelSetConstPtr exclude_labels_;
-
- float distance_threshold_;
-
- bool depth_dependent_;
-
- Eigen::Vector3f z_axis_;
- };
- } // namespace experimental
+ float dist_threshold = distance_threshold_;
+ if (depth_dependent_)
+ {
+ Eigen::Vector3f vec = (*input_)[idx1].getVector3fMap ();
+ float z = vec.dot (z_axis_);
+ dist_threshold *= z * z;
+ }
+ const float dist = ((*input_)[idx1].getVector3fMap ()
+ - (*input_)[idx2].getVector3fMap ()).norm ();
+ return (dist < dist_threshold);
+ }
- /** \brief EuclideanClusterComparator is a comparator used for finding clusters based on euclidian distance.
- *
- * \author Alex Trevor
- */
- template<typename PointT, typename PointNT, typename PointLT = deprecated::T>
- class EuclideanClusterComparator : public experimental::EuclideanClusterComparator<PointT, PointLT>
- {
protected:
- using experimental::EuclideanClusterComparator<PointT, PointLT>::exclude_labels_;
-
- public:
-
- using PointCloudN = pcl::PointCloud<PointNT>;
- using PointCloudNPtr = typename PointCloudN::Ptr;
- using PointCloudNConstPtr = typename PointCloudN::ConstPtr;
-
- using Ptr = shared_ptr<EuclideanClusterComparator<PointT, PointNT, PointLT> >;
- using ConstPtr = shared_ptr<const EuclideanClusterComparator<PointT, PointNT, PointLT> >;
-
- using experimental::EuclideanClusterComparator<PointT, PointLT>::setExcludeLabels;
-
- /** \brief Default constructor for EuclideanClusterComparator. */
- PCL_DEPRECATED(1, 12, "remove PointNT from template parameters")
- EuclideanClusterComparator ()
- : normals_ ()
- , angular_threshold_ (0.0f)
- {}
-
- /** \brief Provide a pointer to the input normals.
- * \param[in] normals the input normal cloud
- */
- PCL_DEPRECATED(1, 12, "EuclideadClusterComparator never actually used normals and angular threshold, this function has no effect on the behavior of the comparator. It is deprecated and will be removed in future releases.")
- inline void
- setInputNormals (const PointCloudNConstPtr& normals) { normals_ = normals; }
-
- /** \brief Get the input normals. */
- PCL_DEPRECATED(1, 12, "EuclideadClusterComparator never actually used normals and angular threshold, this function has no effect on the behavior of the comparator. It is deprecated and will be removed in future releases.")
- inline PointCloudNConstPtr
- getInputNormals () const { return (normals_); }
- /** \brief Set the tolerance in radians for difference in normal direction between neighboring points, to be considered part of the same plane.
- * \param[in] angular_threshold the tolerance in radians
+ /** \brief Set of labels with similar size as the input point cloud,
+ * aggregating points into groups based on a similar label identifier.
+ *
+ * It needs to be set in conjunction with the \ref exclude_labels_
+ * member in order to provided a masking functionality.
*/
- PCL_DEPRECATED(1, 12, "EuclideadClusterComparator never actually used normals and angular threshold, this function has no effect on the behavior of the comparator. It is deprecated and will be removed in future releases.")
- inline void
- setAngularThreshold (float angular_threshold)
- {
- angular_threshold_ = std::cos (angular_threshold);
- }
-
- /** \brief Get the angular threshold in radians for difference in normal direction between neighboring points, to be considered part of the same plane. */
- PCL_DEPRECATED(1, 12, "EuclideadClusterComparator never actually used normals and angular threshold, this function has no effect on the behavior of the comparator. It is deprecated and will be removed in future releases.")
- inline float
- getAngularThreshold () const { return (std::acos (angular_threshold_) ); }
+ PointCloudLPtr labels_;
- /** \brief Set labels in the label cloud to exclude.
- * \param[in] exclude_labels a vector of bools corresponding to whether or not a given label should be considered
+ /** \brief Specifies which labels should be excluded com being clustered.
+ *
+ * If a label is not specified, it's assumed by default that it's
+ * intended be excluded
*/
- PCL_DEPRECATED(1, 12, "use setExcludeLabels(const ExcludeLabelSetConstPtr &) instead")
- void
- setExcludeLabels (const std::vector<bool>& exclude_labels)
- {
- exclude_labels_ = pcl::make_shared<std::set<std::uint32_t> > ();
- for (std::size_t i = 0; i < exclude_labels.size (); ++i)
- if (exclude_labels[i])
- exclude_labels_->insert (i);
- }
+ ExcludeLabelSetConstPtr exclude_labels_;
- protected:
+ float distance_threshold_ = 0.005f;
- PointCloudNConstPtr normals_;
+ bool depth_dependent_ = false;
- float angular_threshold_;
+ Eigen::Vector3f z_axis_;
};
-
- template<typename PointT, typename PointLT>
- class EuclideanClusterComparator<PointT, PointLT, deprecated::T>
- : public experimental::EuclideanClusterComparator<PointT, PointLT> {};
}
#pragma once
-#include <pcl/segmentation/boost.h>
#include <pcl/segmentation/plane_coefficient_comparator.h>
namespace pcl
#pragma once
+#include <pcl/console/print.h> // for PCL_ERROR
#include <pcl/pcl_base.h>
-#include <pcl/search/pcl_search.h>
+#include <pcl/search/search.h> // for Search
+#include <pcl/search/kdtree.h> // for KdTree
namespace pcl
{
*/
template <typename PointT> void
extractEuclideanClusters (
- const PointCloud<PointT> &cloud, const std::vector<int> &indices,
+ const PointCloud<PointT> &cloud, const Indices &indices,
const typename search::Search<PointT>::Ptr &tree, float tolerance, std::vector<PointIndices> &clusters,
unsigned int min_pts_per_cluster = 1, unsigned int max_pts_per_cluster = (std::numeric_limits<int>::max) ());
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/** \brief Decompose a region of space into clusters based on the euclidean distance between points, and the normal
- * angular deviation
+ * angular deviation between points. Each point added to the cluster is origin to another radius search. Each point
+ * within radius range will be compared to the origin in respect to normal angle and euclidean distance. If both
+ * are under their respective threshold the point will be added to the cluster. Generally speaking the cluster
+ * algorithm will not stop on smooth surfaces but on surfaces with sharp edges.
* \param cloud the point cloud message
* \param normals the point cloud message containing normal information
* \param tree the spatial locator (e.g., kd-tree) used for nearest neighbors searching
static_cast<std::size_t>(normals.size()));
return;
}
+ const double cos_eps_angle = std::cos (eps_angle); // compute this once instead of acos many times (faster)
// Create a bool vector of processed point indices, and initialize it to false
std::vector<bool> processed (cloud.size (), false);
- std::vector<int> nn_indices;
+ Indices nn_indices;
std::vector<float> nn_distances;
// Process all points in the indices vector
for (std::size_t i = 0; i < cloud.size (); ++i)
if (processed[i])
continue;
- std::vector<unsigned int> seed_queue;
+ Indices seed_queue;
int sq_idx = 0;
- seed_queue.push_back (static_cast<int> (i));
+ seed_queue.push_back (static_cast<index_t> (i));
processed[i] = true;
//processed[nn_indices[j]] = true;
// [-1;1]
- double dot_p = normals[i].normal[0] * normals[nn_indices[j]].normal[0] +
- normals[i].normal[1] * normals[nn_indices[j]].normal[1] +
- normals[i].normal[2] * normals[nn_indices[j]].normal[2];
- if ( std::acos (std::abs (dot_p)) < eps_angle )
+ double dot_p = normals[seed_queue[sq_idx]].normal[0] * normals[nn_indices[j]].normal[0] +
+ normals[seed_queue[sq_idx]].normal[1] * normals[nn_indices[j]].normal[1] +
+ normals[seed_queue[sq_idx]].normal[2] * normals[nn_indices[j]].normal[2];
+ if ( std::abs (dot_p) > cos_eps_angle )
{
processed[nn_indices[j]] = true;
seed_queue.push_back (nn_indices[j]);
r.header = cloud.header;
clusters.push_back (r); // We could avoid a copy by working directly in the vector
}
+ else
+ {
+ PCL_DEBUG("[pcl::extractEuclideanClusters] This cluster has %zu points, which is not between %u and %u points, so it is not a final cluster\n",
+ seed_queue.size (), min_pts_per_cluster, max_pts_per_cluster);
+ }
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/** \brief Decompose a region of space into clusters based on the euclidean distance between points, and the normal
- * angular deviation
+ * angular deviation between points. Each point added to the cluster is origin to another radius search. Each point
+ * within radius range will be compared to the origin in respect to normal angle and euclidean distance. If both
+ * are under their respective threshold the point will be added to the cluster. Generally speaking the cluster
+ * algorithm will not stop on smooth surfaces but on surfaces with sharp edges.
* \param cloud the point cloud message
* \param normals the point cloud message containing normal information
* \param indices a list of point indices to use from \a cloud
* \note the tree has to be created as a spatial locator on \a cloud
* \param tolerance the spatial cluster tolerance as a measure in the L2 Euclidean space
* \param clusters the resultant clusters containing point indices (as PointIndices)
- * \param eps_angle the maximum allowed difference between normals in degrees for cluster/region growing
+ * \param eps_angle the maximum allowed difference between normals in radians for cluster/region growing
* \param min_pts_per_cluster minimum number of points that a cluster may contain (default: 1)
* \param max_pts_per_cluster maximum number of points that a cluster may contain (default: max int)
* \ingroup segmentation
template <typename PointT, typename Normal>
void extractEuclideanClusters (
const PointCloud<PointT> &cloud, const PointCloud<Normal> &normals,
- const std::vector<int> &indices, const typename KdTree<PointT>::Ptr &tree,
+ const Indices &indices, const typename KdTree<PointT>::Ptr &tree,
float tolerance, std::vector<PointIndices> &clusters, double eps_angle,
unsigned int min_pts_per_cluster = 1,
unsigned int max_pts_per_cluster = (std::numeric_limits<int>::max) ())
static_cast<std::size_t>(normals.size()));
return;
}
+ const double cos_eps_angle = std::cos (eps_angle); // compute this once instead of acos many times (faster)
// Create a bool vector of processed point indices, and initialize it to false
std::vector<bool> processed (cloud.size (), false);
- std::vector<int> nn_indices;
+ Indices nn_indices;
std::vector<float> nn_distances;
// Process all points in the indices vector
- for (std::size_t i = 0; i < indices.size (); ++i)
+ for (const auto& point_idx : indices)
{
- if (processed[indices[i]])
+ if (processed[point_idx])
continue;
- std::vector<int> seed_queue;
+ Indices seed_queue;
int sq_idx = 0;
- seed_queue.push_back (indices[i]);
+ seed_queue.push_back (point_idx);
- processed[indices[i]] = true;
+ processed[point_idx] = true;
while (sq_idx < static_cast<int> (seed_queue.size ()))
{
//processed[nn_indices[j]] = true;
// [-1;1]
- double dot_p =
- normals[indices[i]].normal[0] * normals[indices[nn_indices[j]]].normal[0] +
- normals[indices[i]].normal[1] * normals[indices[nn_indices[j]]].normal[1] +
- normals[indices[i]].normal[2] * normals[indices[nn_indices[j]]].normal[2];
- if ( std::acos (std::abs (dot_p)) < eps_angle )
+ double dot_p = normals[seed_queue[sq_idx]].normal[0] * normals[nn_indices[j]].normal[0] +
+ normals[seed_queue[sq_idx]].normal[1] * normals[nn_indices[j]].normal[1] +
+ normals[seed_queue[sq_idx]].normal[2] * normals[nn_indices[j]].normal[2];
+ if ( std::abs (dot_p) > cos_eps_angle )
{
processed[nn_indices[j]] = true;
seed_queue.push_back (nn_indices[j]);
r.header = cloud.header;
clusters.push_back (r);
}
+ else
+ {
+ PCL_DEBUG("[pcl::extractEuclideanClusters] This cluster has %zu points, which is not between %u and %u points, so it is not a final cluster\n",
+ seed_queue.size (), min_pts_per_cluster, max_pts_per_cluster);
+ }
}
}
EuclideanClusterExtraction () : tree_ (),
cluster_tolerance_ (0),
min_pts_per_cluster_ (1),
- max_pts_per_cluster_ (std::numeric_limits<int>::max ())
+ max_pts_per_cluster_ (std::numeric_limits<pcl::uindex_t>::max ())
{};
/** \brief Provide a pointer to the search object.
* \param[in] min_cluster_size the minimum cluster size
*/
inline void
- setMinClusterSize (int min_cluster_size)
+ setMinClusterSize (pcl::uindex_t min_cluster_size)
{
min_pts_per_cluster_ = min_cluster_size;
}
/** \brief Get the minimum number of points that a cluster needs to contain in order to be considered valid. */
- inline int
+ inline pcl::uindex_t
getMinClusterSize () const
{
return (min_pts_per_cluster_);
* \param[in] max_cluster_size the maximum cluster size
*/
inline void
- setMaxClusterSize (int max_cluster_size)
+ setMaxClusterSize (pcl::uindex_t max_cluster_size)
{
max_pts_per_cluster_ = max_cluster_size;
}
/** \brief Get the maximum number of points that a cluster needs to contain in order to be considered valid. */
- inline int
+ inline pcl::uindex_t
getMaxClusterSize () const
{
return (max_pts_per_cluster_);
double cluster_tolerance_;
/** \brief The minimum number of points that a cluster needs to contain in order to be considered valid (default = 1). */
- int min_pts_per_cluster_;
+ pcl::uindex_t min_pts_per_cluster_;
/** \brief The maximum number of points that a cluster needs to contain in order to be considered valid (default = MAXINT). */
- int max_pts_per_cluster_;
+ pcl::uindex_t max_pts_per_cluster_;
/** \brief Class getName method. */
virtual std::string getClassName () const { return ("EuclideanClusterExtraction"); }
#pragma once
+#include <pcl/search/search.h>
#include <pcl/pcl_base.h>
-#include <pcl/search/pcl_search.h>
-namespace pcl
-{
+namespace pcl {
+//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+/** \brief Decompose a region of space into clusters based on the Euclidean distance
+ * between points
+ * \param[in] cloud the point cloud message
+ * \param[in] tree the spatial locator (e.g., kd-tree) used for nearest neighbors
+ * searching
+ * \note the tree has to be created as a spatial locator on \a cloud
+ * \param[in] tolerance the spatial cluster tolerance as a measure in L2 Euclidean space
+ * \param[out] labeled_clusters the resultant clusters containing point indices (as a
+ * vector of PointIndices)
+ * \param[in] min_pts_per_cluster minimum number of points that a cluster may contain
+ * (default: 1)
+ * \param[in] max_pts_per_cluster maximum number of points that a cluster may contain
+ * (default: max int)
+ * \param[in] max_label
+ * \ingroup segmentation
+ */
+template <typename PointT>
+PCL_DEPRECATED(1, 14, "Use of max_label is deprecated")
+void extractLabeledEuclideanClusters(
+ const PointCloud<PointT>& cloud,
+ const typename search::Search<PointT>::Ptr& tree,
+ float tolerance,
+ std::vector<std::vector<PointIndices>>& labeled_clusters,
+ unsigned int min_pts_per_cluster,
+ unsigned int max_pts_per_cluster,
+ unsigned int max_label);
+
+/** \brief Decompose a region of space into clusters based on the Euclidean distance
+ * between points
+ * \param[in] cloud the point cloud message
+ * \param[in] tree the spatial locator (e.g., kd-tree) used for nearest neighbors
+ * searching \note the tree has to be created as a spatial locator on \a cloud
+ * \param[in] tolerance the spatial cluster tolerance as a measure in L2 Euclidean space
+ * \param[out] labeled_clusters the resultant clusters containing point indices
+ * (as a vector of PointIndices)
+ * \param[in] min_pts_per_cluster minimum number of points that a cluster may contain
+ * (default: 1)
+ * \param[in] max_pts_per_cluster maximum number of points that a cluster may contain
+ * (default: max int)
+ * \ingroup segmentation
+ */
+template <typename PointT>
+void
+extractLabeledEuclideanClusters(
+ const PointCloud<PointT>& cloud,
+ const typename search::Search<PointT>::Ptr& tree,
+ float tolerance,
+ std::vector<std::vector<PointIndices>>& labeled_clusters,
+ unsigned int min_pts_per_cluster = 1,
+ unsigned int max_pts_per_cluster = std::numeric_limits<unsigned int>::max());
+
+//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+/** \brief @b LabeledEuclideanClusterExtraction represents a segmentation class for
+ * cluster extraction in an Euclidean sense, with label info. \author Koen Buys
+ * \ingroup segmentation
+ */
+template <typename PointT>
+class LabeledEuclideanClusterExtraction : public PCLBase<PointT> {
+ using BasePCLBase = PCLBase<PointT>;
+
+public:
+ using PointCloud = pcl::PointCloud<PointT>;
+ using PointCloudPtr = typename PointCloud::Ptr;
+ using PointCloudConstPtr = typename PointCloud::ConstPtr;
+
+ using KdTree = pcl::search::Search<PointT>;
+ using KdTreePtr = typename KdTree::Ptr;
+
+ using PointIndicesPtr = PointIndices::Ptr;
+ using PointIndicesConstPtr = PointIndices::ConstPtr;
+
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
- /** \brief Decompose a region of space into clusters based on the Euclidean distance between points
- * \param[in] cloud the point cloud message
- * \param[in] tree the spatial locator (e.g., kd-tree) used for nearest neighbors searching
- * \note the tree has to be created as a spatial locator on \a cloud
- * \param[in] tolerance the spatial cluster tolerance as a measure in L2 Euclidean space
- * \param[out] labeled_clusters the resultant clusters containing point indices (as a vector of PointIndices)
- * \param[in] min_pts_per_cluster minimum number of points that a cluster may contain (default: 1)
- * \param[in] max_pts_per_cluster maximum number of points that a cluster may contain (default: max int)
- * \param[in] max_label
- * \ingroup segmentation
- */
- template <typename PointT> void
- extractLabeledEuclideanClusters (
- const PointCloud<PointT> &cloud, const typename search::Search<PointT>::Ptr &tree,
- float tolerance, std::vector<std::vector<PointIndices> > &labeled_clusters,
- unsigned int min_pts_per_cluster = 1, unsigned int max_pts_per_cluster = std::numeric_limits<unsigned int>::max (),
- unsigned int max_label = std::numeric_limits<unsigned int>::max ());
-
- //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
- //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
- //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
- /** \brief @b LabeledEuclideanClusterExtraction represents a segmentation class for cluster extraction in an Euclidean sense, with label info.
- * \author Koen Buys
- * \ingroup segmentation
- */
- template <typename PointT>
- class LabeledEuclideanClusterExtraction: public PCLBase<PointT>
+ /** \brief Empty constructor. */
+ LabeledEuclideanClusterExtraction()
+ : tree_()
+ , cluster_tolerance_(0)
+ , min_pts_per_cluster_(1)
+ , max_pts_per_cluster_(std::numeric_limits<int>::max())
+ , max_label_(std::numeric_limits<int>::max()){};
+
+ /** \brief Provide a pointer to the search object.
+ * \param[in] tree a pointer to the spatial search object.
+ */
+ inline void
+ setSearchMethod(const KdTreePtr& tree)
+ {
+ tree_ = tree;
+ }
+
+ /** \brief Get a pointer to the search method used. */
+ inline KdTreePtr
+ getSearchMethod() const
+ {
+ return (tree_);
+ }
+
+ /** \brief Set the spatial cluster tolerance as a measure in the L2 Euclidean space
+ * \param[in] tolerance the spatial cluster tolerance as a measure in the L2 Euclidean
+ * space
+ */
+ inline void
+ setClusterTolerance(double tolerance)
+ {
+ cluster_tolerance_ = tolerance;
+ }
+
+ /** \brief Get the spatial cluster tolerance as a measure in the L2 Euclidean space.
+ */
+ inline double
+ getClusterTolerance() const
+ {
+ return (cluster_tolerance_);
+ }
+
+ /** \brief Set the minimum number of points that a cluster needs to contain in order
+ * to be considered valid. \param[in] min_cluster_size the minimum cluster size
+ */
+ inline void
+ setMinClusterSize(int min_cluster_size)
+ {
+ min_pts_per_cluster_ = min_cluster_size;
+ }
+
+ /** \brief Get the minimum number of points that a cluster needs to contain in order
+ * to be considered valid. */
+ inline int
+ getMinClusterSize() const
{
- using BasePCLBase = PCLBase<PointT>;
-
- public:
- using PointCloud = pcl::PointCloud<PointT>;
- using PointCloudPtr = typename PointCloud::Ptr;
- using PointCloudConstPtr = typename PointCloud::ConstPtr;
-
- using KdTree = pcl::search::Search<PointT>;
- using KdTreePtr = typename KdTree::Ptr;
-
- using PointIndicesPtr = PointIndices::Ptr;
- using PointIndicesConstPtr = PointIndices::ConstPtr;
-
- //////////////////////////////////////////////////////////////////////////////////////////////////////////////////
- /** \brief Empty constructor. */
- LabeledEuclideanClusterExtraction () :
- tree_ (),
- cluster_tolerance_ (0),
- min_pts_per_cluster_ (1),
- max_pts_per_cluster_ (std::numeric_limits<int>::max ()),
- max_label_ (std::numeric_limits<int>::max ())
- {};
-
- /** \brief Provide a pointer to the search object.
- * \param[in] tree a pointer to the spatial search object.
- */
- inline void
- setSearchMethod (const KdTreePtr &tree) { tree_ = tree; }
-
- /** \brief Get a pointer to the search method used. */
- inline KdTreePtr
- getSearchMethod () const { return (tree_); }
-
- /** \brief Set the spatial cluster tolerance as a measure in the L2 Euclidean space
- * \param[in] tolerance the spatial cluster tolerance as a measure in the L2 Euclidean space
- */
- inline void
- setClusterTolerance (double tolerance) { cluster_tolerance_ = tolerance; }
-
- /** \brief Get the spatial cluster tolerance as a measure in the L2 Euclidean space. */
- inline double
- getClusterTolerance () const { return (cluster_tolerance_); }
-
- /** \brief Set the minimum number of points that a cluster needs to contain in order to be considered valid.
- * \param[in] min_cluster_size the minimum cluster size
- */
- inline void
- setMinClusterSize (int min_cluster_size) { min_pts_per_cluster_ = min_cluster_size; }
-
- /** \brief Get the minimum number of points that a cluster needs to contain in order to be considered valid. */
- inline int
- getMinClusterSize () const { return (min_pts_per_cluster_); }
-
- /** \brief Set the maximum number of points that a cluster needs to contain in order to be considered valid.
- * \param[in] max_cluster_size the maximum cluster size
- */
- inline void
- setMaxClusterSize (int max_cluster_size) { max_pts_per_cluster_ = max_cluster_size; }
-
- /** \brief Get the maximum number of points that a cluster needs to contain in order to be considered valid. */
- inline int
- getMaxClusterSize () const { return (max_pts_per_cluster_); }
-
- /** \brief Set the maximum number of labels in the cloud.
- * \param[in] max_label the maximum
- */
- inline void
- setMaxLabels (unsigned int max_label) { max_label_ = max_label; }
-
- /** \brief Get the maximum number of labels */
- inline unsigned int
- getMaxLabels () const { return (max_label_); }
-
- /** \brief Cluster extraction in a PointCloud given by <setInputCloud (), setIndices ()>
- * \param[out] labeled_clusters the resultant point clusters
- */
- void
- extract (std::vector<std::vector<PointIndices> > &labeled_clusters);
-
- protected:
- // Members derived from the base class
- using BasePCLBase::input_;
- using BasePCLBase::indices_;
- using BasePCLBase::initCompute;
- using BasePCLBase::deinitCompute;
-
- /** \brief A pointer to the spatial search object. */
- KdTreePtr tree_;
-
- /** \brief The spatial cluster tolerance as a measure in the L2 Euclidean space. */
- double cluster_tolerance_;
-
- /** \brief The minimum number of points that a cluster needs to contain in order to be considered valid (default = 1). */
- int min_pts_per_cluster_;
-
- /** \brief The maximum number of points that a cluster needs to contain in order to be considered valid (default = MAXINT). */
- int max_pts_per_cluster_;
-
- /** \brief The maximum number of labels we can find in this pointcloud (default = MAXINT)*/
- unsigned int max_label_;
-
- /** \brief Class getName method. */
- virtual std::string getClassName () const { return ("LabeledEuclideanClusterExtraction"); }
-
- };
-
- /** \brief Sort clusters method (for std::sort).
- * \ingroup segmentation
- */
- inline bool
- compareLabeledPointClusters (const pcl::PointIndices &a, const pcl::PointIndices &b)
+ return (min_pts_per_cluster_);
+ }
+
+ /** \brief Set the maximum number of points that a cluster needs to contain in order
+ * to be considered valid. \param[in] max_cluster_size the maximum cluster size
+ */
+ inline void
+ setMaxClusterSize(int max_cluster_size)
{
- return (a.indices.size () < b.indices.size ());
+ max_pts_per_cluster_ = max_cluster_size;
}
+
+ /** \brief Get the maximum number of points that a cluster needs to contain in order
+ * to be considered valid. */
+ inline int
+ getMaxClusterSize() const
+ {
+ return (max_pts_per_cluster_);
+ }
+
+ /** \brief Set the maximum number of labels in the cloud.
+ * \param[in] max_label the maximum
+ */
+ PCL_DEPRECATED(1, 14, "Max label is being deprecated")
+ inline void
+ setMaxLabels(unsigned int max_label)
+ {
+ max_label_ = max_label;
+ }
+
+ /** \brief Get the maximum number of labels */
+ PCL_DEPRECATED(1, 14, "Max label is being deprecated")
+ inline unsigned int
+ getMaxLabels() const
+ {
+ return (max_label_);
+ }
+
+ /** \brief Cluster extraction in a PointCloud given by <setInputCloud (), setIndices
+ * ()> \param[out] labeled_clusters the resultant point clusters
+ */
+ void
+ extract(std::vector<std::vector<PointIndices>>& labeled_clusters);
+
+protected:
+ // Members derived from the base class
+ using BasePCLBase::deinitCompute;
+ using BasePCLBase::indices_;
+ using BasePCLBase::initCompute;
+ using BasePCLBase::input_;
+
+ /** \brief A pointer to the spatial search object. */
+ KdTreePtr tree_;
+
+ /** \brief The spatial cluster tolerance as a measure in the L2 Euclidean space. */
+ double cluster_tolerance_;
+
+ /** \brief The minimum number of points that a cluster needs to contain in order to be
+ * considered valid (default = 1). */
+ int min_pts_per_cluster_;
+
+ /** \brief The maximum number of points that a cluster needs to contain in order to be
+ * considered valid (default = MAXINT). */
+ int max_pts_per_cluster_;
+
+ /** \brief The maximum number of labels we can find in this pointcloud (default =
+ * MAXINT)*/
+ unsigned int max_label_;
+
+ /** \brief Class getName method. */
+ virtual std::string
+ getClassName() const
+ {
+ return ("LabeledEuclideanClusterExtraction");
+ }
+};
+
+/** \brief Sort clusters method (for std::sort).
+ * \ingroup segmentation
+ */
+inline bool
+compareLabeledPointClusters(const pcl::PointIndices& a, const pcl::PointIndices& b)
+{
+ return (a.indices.size() < b.indices.size());
}
+} // namespace pcl
#ifdef PCL_NO_PRECOMPILE
#include <pcl/segmentation/impl/extract_labeled_clusters.hpp>
#pragma once
+#include <cfloat> // for FLT_MAX
+
#include <pcl/pcl_base.h>
-#include <pcl/sample_consensus/sac_model_plane.h>
namespace pcl
{
/** Build the initial GMMs using the Orchard and Bouman color clustering algorithm */
PCL_EXPORTS void
buildGMMs (const Image &image,
- const std::vector<int>& indices,
+ const Indices& indices,
const std::vector<SegmentationValue> &hardSegmentation,
std::vector<std::size_t> &components,
GMM &background_GMM, GMM &foreground_GMM);
/** Iteratively learn GMMs using GrabCut updating algorithm */
PCL_EXPORTS void
learnGMMs (const Image& image,
- const std::vector<int>& indices,
+ const Indices& indices,
const std::vector<SegmentationValue>& hard_segmentation,
std::vector<std::size_t>& components,
GMM& background_GMM, GMM& foreground_GMM);
NLinks () : nb_links (0), indices (0), dists (0), weights (0) {}
int nb_links;
- std::vector<int> indices;
+ Indices indices;
std::vector<float> dists;
std::vector<float> weights;
};
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT> void
-pcl::ApproximateProgressiveMorphologicalFilter<PointT>::extract (std::vector<int>& ground)
+pcl::ApproximateProgressiveMorphologicalFilter<PointT>::extract (Indices& ground)
{
bool segmentation_is_possible = initCompute ();
if (!segmentation_is_possible)
// Find indices of the points whose difference between the source and
// filtered point clouds is less than the current height threshold.
- std::vector<int> pt_indices;
+ Indices pt_indices;
for (std::size_t p_idx = 0; p_idx < ground.size (); ++p_idx)
{
PointT p = (*cloud)[p_idx];
#define PCL_SEGMENTATION_IMPL_CONDITIONAL_EUCLIDEAN_CLUSTERING_HPP_
#include <pcl/segmentation/conditional_euclidean_clustering.h>
+#include <pcl/search/organized.h> // for OrganizedNeighbor
+#include <pcl/search/kdtree.h> // for KdTree
template<typename PointT> void
pcl::ConditionalEuclideanClustering<PointT>::segment (pcl::IndicesClusters &clusters)
searcher_->setInputCloud (input_, indices_);
// Temp variables used by search class
- std::vector<int> nn_indices;
+ Indices nn_indices;
std::vector<float> nn_distances;
// Create a bool vector of processed point indices, and initialize it to false
std::vector<bool> processed (input_->size (), false);
// Process all points indexed by indices_
- for (int iii = 0; iii < static_cast<int> (indices_->size ()); ++iii) // iii = input indices iterator
+ for (const auto& iindex : (*indices_)) // iindex = input index
{
// Has this point been processed before?
- if ((*indices_)[iii] == -1 || processed[(*indices_)[iii]])
+ if (iindex == UNAVAILABLE || processed[iindex])
continue;
// Set up a new growing cluster
- std::vector<int> current_cluster;
+ Indices current_cluster;
int cii = 0; // cii = cluster indices iterator
// Add the point to the cluster
- current_cluster.push_back ((*indices_)[iii]);
- processed[(*indices_)[iii]] = true;
+ current_cluster.push_back (iindex);
+ processed[iindex] = true;
// Process the current cluster (it can be growing in size as it is being processed)
while (cii < static_cast<int> (current_cluster.size ()))
for (int nii = 1; nii < static_cast<int> (nn_indices.size ()); ++nii) // nii = neighbor indices iterator
{
// Has this point been processed before?
- if (nn_indices[nii] == -1 || processed[nn_indices[nii]])
+ if (nn_indices[nii] == UNAVAILABLE || processed[nn_indices[nii]])
continue;
// Validate if condition holds
#ifndef PCL_SEGMENTATION_IMPL_CPC_SEGMENTATION_HPP_
#define PCL_SEGMENTATION_IMPL_CPC_SEGMENTATION_HPP_
+#include <pcl/sample_consensus/sac_model_plane.h> // for SampleConsensusModelPlane
#include <pcl/segmentation/cpc_segmentation.h>
template <typename PointT>
int cluster_concave_pts = 0;
float cluster_score = 0;
// std::cout << "Cluster has " << cluster_indices[cc].indices.size () << " points" << std::endl;
- for (const int ¤t_index : cluster_index.indices)
+ for (const auto ¤t_index : cluster_index.indices)
{
double index_score = weights[current_index];
if (use_directed_weights_)
}
int number_connections_cut = 0;
- for (const int &point_index : cut_support_indices)
+ for (const auto &point_index : cut_support_indices)
{
if (use_clean_cutting_)
{
iterations_ = 0;
best_score_ = -std::numeric_limits<double>::max ();
- std::vector<int> selection;
+ pcl::Indices selection;
Eigen::VectorXf model_coefficients;
unsigned skipped_count = 0;
sac_model_->selectWithinDistance (model_coefficients, threshold_, *current_inliers);
double current_score = 0;
Eigen::Vector3f plane_normal (model_coefficients[0], model_coefficients[1], model_coefficients[2]);
- for (const int ¤t_index : *current_inliers)
+ for (const auto ¤t_index : *current_inliers)
{
double index_score = weights_[current_index];
if (use_directed_weights_)
#ifndef PCL_CRF_SEGMENTATION_HPP_
#define PCL_CRF_SEGMENTATION_HPP_
+#include <pcl/filters/voxel_grid_label.h> // for VoxelGridLabel
#include <pcl/segmentation/crf_segmentation.h>
#include <pcl/common/io.h>
-#include <cstdio>
#include <cstdlib>
#include <ctime>
#define PCL_SEGMENTATION_IMPL_EXTRACT_CLUSTERS_H_
#include <pcl/segmentation/extract_clusters.h>
+#include <pcl/search/organized.h> // for OrganizedNeighbor
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT> void
// Create a bool vector of processed point indices, and initialize it to false
std::vector<bool> processed (cloud.size (), false);
- std::vector<int> nn_indices;
+ Indices nn_indices;
std::vector<float> nn_distances;
// Process all points in the indices vector
for (int i = 0; i < static_cast<int> (cloud.size ()); ++i)
if (processed[i])
continue;
- std::vector<int> seed_queue;
+ Indices seed_queue;
int sq_idx = 0;
seed_queue.push_back (i);
for (std::size_t j = nn_start_idx; j < nn_indices.size (); ++j) // can't assume sorted (default isn't!)
{
- if (nn_indices[j] == -1 || processed[nn_indices[j]]) // Has this point been processed before ?
+ if (nn_indices[j] == UNAVAILABLE || processed[nn_indices[j]]) // Has this point been processed before ?
continue;
// Perform a simple Euclidean clustering
r.header = cloud.header;
clusters.push_back (r); // We could avoid a copy by working directly in the vector
}
+ else
+ {
+ PCL_DEBUG("[pcl::extractEuclideanClusters] This cluster has %zu points, which is not between %u and %u points, so it is not a final cluster\n",
+ seed_queue.size (), min_pts_per_cluster, max_pts_per_cluster);
+ }
}
}
/** @todo: fix the return value, make sure the exit is not needed anymore*/
template <typename PointT> void
pcl::extractEuclideanClusters (const PointCloud<PointT> &cloud,
- const std::vector<int> &indices,
+ const Indices &indices,
const typename search::Search<PointT>::Ptr &tree,
float tolerance, std::vector<PointIndices> &clusters,
unsigned int min_pts_per_cluster,
// Create a bool vector of processed point indices, and initialize it to false
std::vector<bool> processed (cloud.size (), false);
- std::vector<int> nn_indices;
+ Indices nn_indices;
std::vector<float> nn_distances;
// Process all points in the indices vector
- for (const int &index : indices)
+ for (const auto &index : indices)
{
if (processed[index])
continue;
- std::vector<int> seed_queue;
+ Indices seed_queue;
int sq_idx = 0;
seed_queue.push_back (index);
for (std::size_t j = nn_start_idx; j < nn_indices.size (); ++j) // can't assume sorted (default isn't!)
{
- if (nn_indices[j] == -1 || processed[nn_indices[j]]) // Has this point been processed before ?
+ if (nn_indices[j] == UNAVAILABLE || processed[nn_indices[j]]) // Has this point been processed before ?
continue;
// Perform a simple Euclidean clustering
r.header = cloud.header;
clusters.push_back (r); // We could avoid a copy by working directly in the vector
}
+ else
+ {
+ PCL_DEBUG("[pcl::extractEuclideanClusters] This cluster has %zu points, which is not between %u and %u points, so it is not a final cluster\n",
+ seed_queue.size (), min_pts_per_cluster, max_pts_per_cluster);
+ }
}
}
#define PCL_INSTANTIATE_EuclideanClusterExtraction(T) template class PCL_EXPORTS pcl::EuclideanClusterExtraction<T>;
#define PCL_INSTANTIATE_extractEuclideanClusters(T) template void PCL_EXPORTS pcl::extractEuclideanClusters<T>(const pcl::PointCloud<T> &, const typename pcl::search::Search<T>::Ptr &, float , std::vector<pcl::PointIndices> &, unsigned int, unsigned int);
-#define PCL_INSTANTIATE_extractEuclideanClusters_indices(T) template void PCL_EXPORTS pcl::extractEuclideanClusters<T>(const pcl::PointCloud<T> &, const std::vector<int> &, const typename pcl::search::Search<T>::Ptr &, float , std::vector<pcl::PointIndices> &, unsigned int, unsigned int);
+#define PCL_INSTANTIATE_extractEuclideanClusters_indices(T) template void PCL_EXPORTS pcl::extractEuclideanClusters<T>(const pcl::PointCloud<T> &, const pcl::Indices &, const typename pcl::search::Search<T>::Ptr &, float , std::vector<pcl::PointIndices> &, unsigned int, unsigned int);
#endif // PCL_EXTRACT_CLUSTERS_IMPL_H_
#include <pcl/segmentation/extract_labeled_clusters.h>
//////////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointT> void
-pcl::extractLabeledEuclideanClusters (const PointCloud<PointT> &cloud,
- const typename search::Search<PointT>::Ptr &tree,
- float tolerance,
- std::vector<std::vector<PointIndices> > &labeled_clusters,
- unsigned int min_pts_per_cluster,
- unsigned int max_pts_per_cluster,
- unsigned int)
+template <typename PointT>
+void
+pcl::extractLabeledEuclideanClusters(
+ const PointCloud<PointT>& cloud,
+ const typename search::Search<PointT>::Ptr& tree,
+ float tolerance,
+ std::vector<std::vector<PointIndices>>& labeled_clusters,
+ unsigned int min_pts_per_cluster,
+ unsigned int max_pts_per_cluster,
+ unsigned int)
{
- if (tree->getInputCloud ()->size () != cloud.size ())
- {
+ pcl::extractLabeledEuclideanClusters<PointT>(cloud,
+ tree,
+ tolerance,
+ labeled_clusters,
+ min_pts_per_cluster,
+ max_pts_per_cluster);
+}
+
+template <typename PointT>
+void
+pcl::extractLabeledEuclideanClusters(
+ const PointCloud<PointT>& cloud,
+ const typename search::Search<PointT>::Ptr& tree,
+ float tolerance,
+ std::vector<std::vector<PointIndices>>& labeled_clusters,
+ unsigned int min_pts_per_cluster,
+ unsigned int max_pts_per_cluster)
+{
+ if (tree->getInputCloud()->size() != cloud.size()) {
PCL_ERROR("[pcl::extractLabeledEuclideanClusters] Tree built for a different point "
- "cloud dataset (%zu) than the input cloud (%zu)!\n",
- static_cast<std::size_t>(tree->getInputCloud()->size()),
- static_cast<std::size_t>(cloud.size()));
+ "cloud dataset (%lu) than the input cloud (%lu)!\n",
+ tree->getInputCloud()->size(),
+ cloud.size());
return;
}
// Create a bool vector of processed point indices, and initialize it to false
- std::vector<bool> processed (cloud.size (), false);
+ std::vector<bool> processed(cloud.size(), false);
- std::vector<int> nn_indices;
+ Indices nn_indices;
std::vector<float> nn_distances;
// Process all points in the indices vector
- for (int i = 0; i < static_cast<int> (cloud.size ()); ++i)
- {
+ for (index_t i = 0; i < static_cast<index_t>(cloud.size()); ++i) {
if (processed[i])
continue;
- std::vector<int> seed_queue;
+ Indices seed_queue;
int sq_idx = 0;
- seed_queue.push_back (i);
+ seed_queue.push_back(i);
processed[i] = true;
- while (sq_idx < static_cast<int> (seed_queue.size ()))
- {
+ while (sq_idx < static_cast<int>(seed_queue.size())) {
// Search for sq_idx
- int ret = tree->radiusSearch (seed_queue[sq_idx], tolerance, nn_indices, nn_distances, std::numeric_limits<int>::max());
- if(ret == -1)
+ int ret = tree->radiusSearch(seed_queue[sq_idx],
+ tolerance,
+ nn_indices,
+ nn_distances,
+ std::numeric_limits<int>::max());
+ if (ret == -1)
PCL_ERROR("radiusSearch on tree came back with error -1");
- if (!ret)
- {
+ if (!ret) {
sq_idx++;
continue;
}
- for (std::size_t j = 1; j < nn_indices.size (); ++j) // nn_indices[0] should be sq_idx
+ for (std::size_t j = 1; j < nn_indices.size();
+ ++j) // nn_indices[0] should be sq_idx
{
- if (processed[nn_indices[j]]) // Has this point been processed before ?
+ if (processed[nn_indices[j]]) // Has this point been processed before ?
continue;
- if (cloud[i].label == cloud[nn_indices[j]].label)
- {
+ if (cloud[i].label == cloud[nn_indices[j]].label) {
// Perform a simple Euclidean clustering
- seed_queue.push_back (nn_indices[j]);
+ seed_queue.push_back(nn_indices[j]);
processed[nn_indices[j]] = true;
}
}
}
// If this queue is satisfactory, add to the clusters
- if (seed_queue.size () >= min_pts_per_cluster && seed_queue.size () <= max_pts_per_cluster)
- {
+ if (seed_queue.size() >= min_pts_per_cluster &&
+ seed_queue.size() <= max_pts_per_cluster) {
pcl::PointIndices r;
- r.indices.resize (seed_queue.size ());
- for (std::size_t j = 0; j < seed_queue.size (); ++j)
+ r.indices.resize(seed_queue.size());
+ for (std::size_t j = 0; j < seed_queue.size(); ++j)
r.indices[j] = seed_queue[j];
- std::sort (r.indices.begin (), r.indices.end ());
- r.indices.erase (std::unique (r.indices.begin (), r.indices.end ()), r.indices.end ());
+ std::sort(r.indices.begin(), r.indices.end());
+ r.indices.erase(std::unique(r.indices.begin(), r.indices.end()), r.indices.end());
r.header = cloud.header;
- labeled_clusters[cloud[i].label].push_back (r); // We could avoid a copy by working directly in the vector
+ labeled_clusters[cloud[i].label].push_back(
+ r); // We could avoid a copy by working directly in the vector
}
}
}
//////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointT> void
-pcl::LabeledEuclideanClusterExtraction<PointT>::extract (std::vector<std::vector<PointIndices> > &labeled_clusters)
+template <typename PointT>
+void
+pcl::LabeledEuclideanClusterExtraction<PointT>::extract(
+ std::vector<std::vector<PointIndices>>& labeled_clusters)
{
- if (!initCompute () ||
- (input_ && input_->points.empty ()) ||
- (indices_ && indices_->empty ()))
- {
- labeled_clusters.clear ();
+ if (!initCompute() || (input_ && input_->empty()) ||
+ (indices_ && indices_->empty())) {
+ labeled_clusters.clear();
return;
}
// Initialize the spatial locator
- if (!tree_)
- {
- if (input_->isOrganized ())
- tree_.reset (new pcl::search::OrganizedNeighbor<PointT> ());
+ if (!tree_) {
+ if (input_->isOrganized())
+ tree_.reset(new pcl::search::OrganizedNeighbor<PointT>());
else
- tree_.reset (new pcl::search::KdTree<PointT> (false));
+ tree_.reset(new pcl::search::KdTree<PointT>(false));
}
// Send the input dataset to the spatial locator
- tree_->setInputCloud (input_);
- extractLabeledEuclideanClusters (*input_, tree_, static_cast<float> (cluster_tolerance_), labeled_clusters, min_pts_per_cluster_, max_pts_per_cluster_, max_label_);
+ tree_->setInputCloud(input_);
+ extractLabeledEuclideanClusters(*input_,
+ tree_,
+ static_cast<float>(cluster_tolerance_),
+ labeled_clusters,
+ min_pts_per_cluster_,
+ max_pts_per_cluster_);
// Sort the clusters based on their size (largest one first)
- for (auto &labeled_cluster : labeled_clusters)
- std::sort (labeled_cluster.rbegin (), labeled_cluster.rend (), comparePointClusters);
+ for (auto& labeled_cluster : labeled_clusters)
+ std::sort(labeled_cluster.rbegin(), labeled_cluster.rend(), comparePointClusters);
- deinitCompute ();
+ deinitCompute();
}
-#define PCL_INSTANTIATE_LabeledEuclideanClusterExtraction(T) template class PCL_EXPORTS pcl::LabeledEuclideanClusterExtraction<T>;
-#define PCL_INSTANTIATE_extractLabeledEuclideanClusters(T) template void PCL_EXPORTS pcl::extractLabeledEuclideanClusters<T>(const pcl::PointCloud<T> &, const typename pcl::search::Search<T>::Ptr &, float , std::vector<std::vector<pcl::PointIndices> > &, unsigned int, unsigned int, unsigned int);
-
-#endif // PCL_EXTRACT_CLUSTERS_IMPL_H_
+#define PCL_INSTANTIATE_LabeledEuclideanClusterExtraction(T) \
+ template class PCL_EXPORTS pcl::LabeledEuclideanClusterExtraction<T>;
+#define PCL_INSTANTIATE_extractLabeledEuclideanClusters_deprecated(T) \
+ template void PCL_EXPORTS pcl::extractLabeledEuclideanClusters<T>( \
+ const pcl::PointCloud<T>&, \
+ const typename pcl::search::Search<T>::Ptr&, \
+ float, \
+ std::vector<std::vector<pcl::PointIndices>>&, \
+ unsigned int, \
+ unsigned int, \
+ unsigned int);
+#define PCL_INSTANTIATE_extractLabeledEuclideanClusters(T) \
+ template void PCL_EXPORTS pcl::extractLabeledEuclideanClusters<T>( \
+ const pcl::PointCloud<T>&, \
+ const typename pcl::search::Search<T>::Ptr&, \
+ float, \
+ std::vector<std::vector<pcl::PointIndices>>&, \
+ unsigned int, \
+ unsigned int);
+
+#endif // PCL_EXTRACT_CLUSTERS_IMPL_H_
#define PCL_SEGMENTATION_IMPL_EXTRACT_POLYGONAL_PRISM_DATA_H_
#include <pcl/segmentation/extract_polygonal_prism_data.h>
+#include <pcl/sample_consensus/sac_model_plane.h> // for SampleConsensusModelPlane
#include <pcl/common/centroid.h>
#include <pcl/common/eigen.h>
k2 = (k0 + 2) % 3;
// Project the convex hull
pcl::PointCloud<PointT> xy_polygon;
- xy_polygon.points.resize (polygon.size ());
+ xy_polygon.resize (polygon.size ());
for (std::size_t i = 0; i < polygon.size (); ++i)
{
Eigen::Vector4f pt (polygon[i].x, polygon[i].y, polygon[i].z, 0);
k2 = (k0 + 2) % 3;
// Project the convex hull
pcl::PointCloud<PointT> polygon;
- polygon.points.resize (planar_hull_->size ());
+ polygon.resize (planar_hull_->size ());
for (std::size_t i = 0; i < planar_hull_->size (); ++i)
{
Eigen::Vector4f pt ((*planar_hull_)[i].x, (*planar_hull_)[i].y, (*planar_hull_)[i].z, 0);
#pragma once
#include <pcl/common/distances.h>
+#include <pcl/common/io.h> // for getFieldIndex
#include <pcl/common/point_tests.h> // for pcl::isFinite
#include <pcl/search/organized.h>
#include <pcl/search/kdtree.h>
using namespace pcl::segmentation::grabcut;
if (!pcl::PCLBase<PointT>::initCompute ())
{
- PCL_ERROR ("[pcl::GrabCut::initCompute ()] Init failed!");
+ PCL_ERROR ("[pcl::GrabCut::initCompute ()] Init failed!\n");
return (false);
}
if ((pcl::getFieldIndex<PointT> ("rgb", in_fields_) == -1) &&
(pcl::getFieldIndex<PointT> ("rgba", in_fields_) == -1))
{
- PCL_ERROR ("[pcl::GrabCut::initCompute ()] No RGB data available, aborting!");
+ PCL_ERROR ("[pcl::GrabCut::initCompute ()] No RGB data available, aborting!\n");
return (false);
}
std::fill (trimap_.begin (), trimap_.end (), TrimapBackground);
std::fill (hard_segmentation_.begin (), hard_segmentation_.end (), SegmentationBackground);
- for (const int &index : indices->indices)
+ for (const auto &index : indices->indices)
{
trimap_[index] = TrimapUnknown;
hard_segmentation_[index] = SegmentationForeground;
GrabCut<PointT>::setTrimap (const PointIndicesConstPtr &indices, segmentation::grabcut::TrimapValue t)
{
using namespace pcl::segmentation::grabcut;
- for (const int &index : indices->indices)
+ for (const auto &index : indices->indices)
trimap_[index] = t;
// Immediately set the hard segmentation as well so that the display will update.
if (t == TrimapForeground)
- for (const int &index : indices->indices)
+ for (const auto &index : indices->indices)
hard_segmentation_[index] = SegmentationForeground;
else
if (t == TrimapBackground)
- for (const int &index : indices->indices)
+ for (const auto &index : indices->indices)
hard_segmentation_[index] = SegmentationBackground;
}
const NLinks &n_link = n_links_[i_point];
if (n_link.nb_links > 0)
{
- int point_index = (*indices_) [i_point];
+ const auto point_index = (*indices_) [i_point];
std::vector<float>::const_iterator weights_it = n_link.weights.begin ();
for (auto indices_it = n_link.indices.cbegin (); indices_it != n_link.indices.cend (); ++indices_it, ++weights_it)
{
- if ((*indices_it != point_index) && (*indices_it > -1))
+ if ((*indices_it != point_index) && (*indices_it != UNAVAILABLE))
{
addEdge (graph_nodes_[i_point], graph_nodes_[*indices_it], *weights_it, *weights_it);
}
NLinks &n_link = n_links_[i_point];
if (n_link.nb_links > 0)
{
- int point_index = (*indices_) [i_point];
+ const auto point_index = (*indices_) [i_point];
auto dists_it = n_link.dists.cbegin ();
auto weights_it = n_link.weights.begin ();
for (auto indices_it = n_link.indices.cbegin (); indices_it != n_link.indices.cend (); ++indices_it, ++dists_it, ++weights_it)
for (int i_point = 0; i_point < number_of_indices; i_point++)
{
- int point_index = (*indices_)[i_point];
+ const auto point_index = (*indices_)[i_point];
const PointT& point = input_->points [point_index];
if (pcl::isFinite (point))
{
{
links.nb_links = found - 1;
links.weights.reserve (links.nb_links);
- for (std::vector<int>::const_iterator nn_it = links.indices.begin (); nn_it != links.indices.end (); ++nn_it)
+ for (const auto& nn_index : links.indices)
{
- if (*nn_it != point_index)
+ if (nn_index != point_index)
{
- float color_distance = squaredEuclideanDistance ((*image_)[point_index], (*image_)[*nn_it]);
+ float color_distance = squaredEuclideanDistance ((*image_)[point_index], (*image_)[nn_index]);
links.weights.push_back (color_distance);
result+= color_distance;
++edges;
#ifndef PCL_SEGMENTATION_MIN_CUT_SEGMENTATION_HPP_
#define PCL_SEGMENTATION_MIN_CUT_SEGMENTATION_HPP_
-#include <pcl/segmentation/boost.h>
+#include <boost/graph/boykov_kolmogorov_max_flow.hpp> // for boykov_kolmogorov_max_flow
#include <pcl/segmentation/min_cut_segmentation.h>
#include <pcl/search/search.h>
#include <pcl/search/kdtree.h>
-#include <cstdlib>
#include <cmath>
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
source_ = vertices_[number_of_points];
sink_ = vertices_[number_of_points + 1];
- for (std::size_t i_point = 0; i_point < number_of_indices; i_point++)
+ for (const auto& point_index : (*indices_))
{
- index_t point_index = (*indices_)[i_point];
double source_weight = 0.0;
double sink_weight = 0.0;
calculateUnaryPotential (point_index, source_weight, sink_weight);
addEdge (point_index, static_cast<int> (sink_), sink_weight);
}
- std::vector<int> neighbours;
+ pcl::Indices neighbours;
std::vector<float> distances;
search_->setInputCloud (input_, indices_);
for (std::size_t i_point = 0; i_point < number_of_indices; i_point++)
initial_point[0] = (*input_)[point].x;
initial_point[1] = (*input_)[point].y;
- for (std::size_t i_point = 0; i_point < foreground_points_.size (); i_point++)
+ for (const auto& fg_point : foreground_points_)
{
double dist = 0.0;
- dist += (foreground_points_[i_point].x - initial_point[0]) * (foreground_points_[i_point].x - initial_point[0]);
- dist += (foreground_points_[i_point].y - initial_point[1]) * (foreground_points_[i_point].y - initial_point[1]);
+ dist += (fg_point.x - initial_point[0]) * (fg_point.x - initial_point[0]);
+ dist += (fg_point.y - initial_point[1]) * (fg_point.y - initial_point[1]);
if (min_dist_to_foreground > dist)
{
min_dist_to_foreground = dist;
if (background_points_.size () == 0)
return;
- for (int i_point = 0; i_point < background_points_.size (); i_point++)
+ for (const auto& bg_point : background_points_)
{
double dist = 0.0;
- dist += (background_points_[i_point].x - initial_point[0]) * (background_points_[i_point].x - initial_point[0]);
- dist += (background_points_[i_point].y - initial_point[1]) * (background_points_[i_point].y - initial_point[1]);
+ dist += (bg_point.x - initial_point[0]) * (bg_point.x - initial_point[0]);
+ dist += (bg_point.y - initial_point[1]) * (bg_point.y - initial_point[1]);
if (min_dist_to_background > dist)
{
min_dist_to_background = dist;
- closest_background_point[0] = background_points_[i_point].x;
- closest_background_point[1] = background_points_[i_point].y;
+ closest_background_point[0] = bg_point.x;
+ closest_background_point[1] = bg_point.y;
}
}
template <typename PointT> bool
pcl::MinCutSegmentation<PointT>::recalculateBinaryPotentials ()
{
- int number_of_points = static_cast<int> (indices_->size ());
-
VertexIterator vertex_iter;
VertexIterator vertex_end;
OutEdgeIterator edge_iter;
std::vector< std::set<VertexDescriptor> > edge_marker;
std::set<VertexDescriptor> out_edges_marker;
edge_marker.clear ();
- edge_marker.resize (number_of_points + 2, out_edges_marker);
+ edge_marker.resize (indices_->size () + 2, out_edges_marker);
for (boost::tie (vertex_iter, vertex_end) = boost::vertices (*graph_); vertex_iter != vertex_end; vertex_iter++)
{
{
std::vector<int> labels;
labels.resize (input_->size (), 0);
- int number_of_indices = static_cast<int> (indices_->size ());
- for (int i_point = 0; i_point < number_of_indices; i_point++)
- labels[(*indices_)[i_point]] = 1;
+ for (const auto& i_point : (*indices_))
+ labels[i_point] = 1;
clusters_.clear ();
if (!clusters_.empty ())
{
- int num_of_pts_in_first_cluster = static_cast<int> (clusters_[0].indices.size ());
- int num_of_pts_in_second_cluster = static_cast<int> (clusters_[1].indices.size ());
- int number_of_points = num_of_pts_in_first_cluster + num_of_pts_in_second_cluster;
colored_cloud = (new pcl::PointCloud<pcl::PointXYZRGB>)->makeShared ();
unsigned char foreground_color[3] = {255, 255, 255};
unsigned char background_color[3] = {255, 0, 0};
- colored_cloud->width = number_of_points;
+ colored_cloud->width = (clusters_[0].indices.size () + clusters_[1].indices.size ());
colored_cloud->height = 1;
colored_cloud->is_dense = input_->is_dense;
pcl::PointXYZRGB point;
- for (int i_point = 0; i_point < num_of_pts_in_first_cluster; i_point++)
+ for (const auto& point_index : (clusters_[0].indices))
{
- index_t point_index = clusters_[0].indices[i_point];
point.x = *((*input_)[point_index].data);
point.y = *((*input_)[point_index].data + 1);
point.z = *((*input_)[point_index].data + 2);
colored_cloud->points.push_back (point);
}
- for (int i_point = 0; i_point < num_of_pts_in_second_cluster; i_point++)
+ for (const auto& point_index : (clusters_[1].indices))
{
- index_t point_index = clusters_[1].indices[i_point];
point.x = *((*input_)[point_index].data);
point.y = *((*input_)[point_index].data + 1);
point.z = *((*input_)[point_index].data + 2);
unsigned invalid_label = std::numeric_limits<unsigned>::max ();
PointLT invalid_pt;
invalid_pt.label = std::numeric_limits<unsigned>::max ();
- labels.points.resize (input_->size (), invalid_pt);
+ labels.resize (input_->size (), invalid_pt);
labels.width = input_->width;
labels.height = input_->height;
std::size_t clust_id = 0;
#ifndef PCL_SEGMENTATION_IMPL_ORGANIZED_MULTI_PLANE_SEGMENTATION_H_
#define PCL_SEGMENTATION_IMPL_ORGANIZED_MULTI_PLANE_SEGMENTATION_H_
-#include <pcl/segmentation/boost.h>
#include <pcl/segmentation/organized_connected_component_segmentation.h>
#include <pcl/segmentation/organized_multi_plane_segmentation.h>
#include <pcl/common/centroid.h>
{
boundary_cloud.resize (0);
pcl::OrganizedConnectedComponentSegmentation<PointT,PointLT>::findLabeledRegionBoundary (inlier_indices[i].indices[0], labels, boundary_indices[i]);
- boundary_cloud.points.resize (boundary_indices[i].indices.size ());
+ boundary_cloud.resize (boundary_indices[i].indices.size ());
for (std::size_t j = 0; j < boundary_indices[i].indices.size (); j++)
boundary_cloud[j] = (*input_)[boundary_indices[i].indices[j]];
boundary_cloud.resize (0);
int max_inlier_idx = static_cast<int> (inlier_indices[i].indices.size ()) - 1;
pcl::OrganizedConnectedComponentSegmentation<PointT,PointLT>::findLabeledRegionBoundary (inlier_indices[i].indices[max_inlier_idx], labels, boundary_indices[i]);
- boundary_cloud.points.resize (boundary_indices[i].indices.size ());
+ boundary_cloud.resize (boundary_indices[i].indices.size ());
for (std::size_t j = 0; j < boundary_indices[i].indices.size (); j++)
boundary_cloud[j] = (*input_)[boundary_indices[i].indices[j]];
boundary_cloud.resize (0);
int max_inlier_idx = static_cast<int> (inlier_indices[i].indices.size ()) - 1;
pcl::OrganizedConnectedComponentSegmentation<PointT,PointLT>::findLabeledRegionBoundary (inlier_indices[i].indices[max_inlier_idx], labels, boundary_indices[i]);
- boundary_cloud.points.resize (boundary_indices[i].indices.size ());
+ boundary_cloud.resize (boundary_indices[i].indices.size ());
for (std::size_t j = 0; j < boundary_indices[i].indices.size (); j++)
boundary_cloud[j] = (*input_)[boundary_indices[i].indices[j]];
model_coefficients[i].values[3]);
Eigen::Vector3f vp (0.0, 0.0, 0.0);
- if (project_points_ && !boundary_cloud.points.empty ())
+ if (project_points_ && !boundary_cloud.empty ())
boundary_cloud = projectToPlaneFromViewpoint (boundary_cloud, model, centroid, vp);
regions[i] = PlanarRegion<PointT> (centroid,
#include <pcl/common/common.h>
#include <pcl/common/io.h>
#include <pcl/filters/morphological_filter.h>
-#include <pcl/filters/extract_indices.h>
#include <pcl/segmentation/progressive_morphological_filter.h>
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT> void
-pcl::ProgressiveMorphologicalFilter<PointT>::extract (std::vector<int>& ground)
+pcl::ProgressiveMorphologicalFilter<PointT>::extract (Indices& ground)
{
bool segmentation_is_possible = initCompute ();
if (!segmentation_is_possible)
// Find indices of the points whose difference between the source and
// filtered point clouds is less than the current height threshold.
- std::vector<int> pt_indices;
+ Indices pt_indices;
for (std::size_t p_idx = 0; p_idx < ground.size (); ++p_idx)
{
float diff = (*cloud)[p_idx].z - (*cloud_f)[p_idx].z;
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
#include <pcl/common/point_tests.h> // for pcl::isFinite
+#include <pcl/console/print.h> // for PCL_ERROR
#include <pcl/search/search.h>
#include <pcl/search/kdtree.h>
#include <queue>
-#include <list>
#include <cmath>
#include <ctime>
template <typename PointT, typename NormalT>
pcl::RegionGrowing<PointT, NormalT>::RegionGrowing () :
min_pts_per_cluster_ (1),
- max_pts_per_cluster_ (std::numeric_limits<int>::max ()),
+ max_pts_per_cluster_ (std::numeric_limits<pcl::uindex_t>::max ()),
smooth_mode_flag_ (true),
curvature_flag_ (true),
residual_flag_ (false),
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointT, typename NormalT> int
+template <typename PointT, typename NormalT> pcl::uindex_t
pcl::RegionGrowing<PointT, NormalT>::getMinClusterSize ()
{
return (min_pts_per_cluster_);
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT, typename NormalT> void
-pcl::RegionGrowing<PointT, NormalT>::setMinClusterSize (int min_cluster_size)
+pcl::RegionGrowing<PointT, NormalT>::setMinClusterSize (pcl::uindex_t min_cluster_size)
{
min_pts_per_cluster_ = min_cluster_size;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointT, typename NormalT> int
+template <typename PointT, typename NormalT> pcl::uindex_t
pcl::RegionGrowing<PointT, NormalT>::getMaxClusterSize ()
{
return (max_pts_per_cluster_);
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT, typename NormalT> void
-pcl::RegionGrowing<PointT, NormalT>::setMaxClusterSize (int max_cluster_size)
+pcl::RegionGrowing<PointT, NormalT>::setMaxClusterSize (pcl::uindex_t max_cluster_size)
{
max_pts_per_cluster_ = max_cluster_size;
}
clusters.resize (clusters_.size ());
std::vector<pcl::PointIndices>::iterator cluster_iter_input = clusters.begin ();
- for (std::vector<pcl::PointIndices>::const_iterator cluster_iter = clusters_.begin (); cluster_iter != clusters_.end (); ++cluster_iter)
+ for (const auto& cluster : clusters_)
{
- if ((static_cast<int> (cluster_iter->indices.size ()) >= min_pts_per_cluster_) &&
- (static_cast<int> (cluster_iter->indices.size ()) <= max_pts_per_cluster_))
+ if ((cluster.indices.size () >= min_pts_per_cluster_) &&
+ (cluster.indices.size () <= max_pts_per_cluster_))
{
- *cluster_iter_input = *cluster_iter;
+ *cluster_iter_input = cluster;
++cluster_iter_input;
}
}
pcl::RegionGrowing<PointT, NormalT>::findPointNeighbours ()
{
int point_number = static_cast<int> (indices_->size ());
- std::vector<int> neighbours;
+ pcl::Indices neighbours;
std::vector<float> distances;
point_neighbours_.resize (input_->size (), neighbours);
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT, typename NormalT> bool
-pcl::RegionGrowing<PointT, NormalT>::validatePoint (int initial_seed, int point, int nghbr, bool& is_a_seed) const
+pcl::RegionGrowing<PointT, NormalT>::validatePoint (pcl::index_t initial_seed, pcl::index_t point, pcl::index_t nghbr, bool& is_a_seed) const
{
is_a_seed = true;
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT, typename NormalT> void
-pcl::RegionGrowing<PointT, NormalT>::getSegmentFromPoint (int index, pcl::PointIndices& cluster)
+pcl::RegionGrowing<PointT, NormalT>::getSegmentFromPoint (pcl::index_t index, pcl::PointIndices& cluster)
{
cluster.indices.clear ();
// first of all we need to find out if this point belongs to cloud
bool point_was_found = false;
- int number_of_points = static_cast <int> (indices_->size ());
- for (int point = 0; point < number_of_points; point++)
- if ( (*indices_)[point] == index)
+ for (const auto& point : (*indices_))
+ if (point == index)
{
point_was_found = true;
break;
}
// if we have already made the segmentation, then find the segment
// to which this point belongs
- for (auto i_segment = clusters_.cbegin (); i_segment != clusters_.cend (); i_segment++)
+ for (const auto& i_segment : clusters_)
{
- bool segment_was_found = false;
- for (std::size_t i_point = 0; i_point < i_segment->indices.size (); i_point++)
- {
- if (i_segment->indices[i_point] == index)
- {
- segment_was_found = true;
- cluster.indices.clear ();
- cluster.indices.reserve (i_segment->indices.size ());
- std::copy (i_segment->indices.begin (), i_segment->indices.end (), std::back_inserter (cluster.indices));
- break;
- }
- }
- if (segment_was_found)
+ const auto it = std::find (i_segment.indices.cbegin (), i_segment.indices.cend (), index);
+ if (it != i_segment.indices.cend())
{
+ // if segment was found
+ cluster.indices.clear ();
+ cluster.indices.reserve (i_segment.indices.size ());
+ std::copy (i_segment.indices.begin (), i_segment.indices.end (), std::back_inserter (cluster.indices));
break;
}
}// next segment
}
int next_color = 0;
- for (auto i_segment = clusters_.cbegin (); i_segment != clusters_.cend (); i_segment++)
+ for (const auto& i_segment : clusters_)
{
- for (auto i_point = i_segment->indices.cbegin (); i_point != i_segment->indices.cend (); i_point++)
+ for (const auto& index : (i_segment.indices))
{
- int index;
- index = *i_point;
(*colored_cloud)[index].r = colors[3 * next_color];
(*colored_cloud)[index].g = colors[3 * next_color + 1];
(*colored_cloud)[index].b = colors[3 * next_color + 2];
}
int next_color = 0;
- for (auto i_segment = clusters_.cbegin (); i_segment != clusters_.cend (); i_segment++)
+ for (const auto& i_segment : clusters_)
{
- for (auto i_point = i_segment->indices.cbegin (); i_point != i_segment->indices.cend (); i_point++)
+ for (const auto& index : (i_segment.indices))
{
- int index = *i_point;
(*colored_cloud)[index].r = colors[3 * next_color];
(*colored_cloud)[index].g = colors[3 * next_color + 1];
(*colored_cloud)[index].b = colors[3 * next_color + 2];
#ifndef PCL_SEGMENTATION_REGION_GROWING_RGB_HPP_
#define PCL_SEGMENTATION_REGION_GROWING_RGB_HPP_
+#include <pcl/console/print.h> // for PCL_ERROR
#include <pcl/segmentation/region_growing_rgb.h>
#include <pcl/search/search.h>
#include <pcl/search/kdtree.h>
std::vector<pcl::PointIndices>::iterator cluster_iter = clusters_.begin ();
while (cluster_iter != clusters_.end ())
{
- if (static_cast<int> (cluster_iter->indices.size ()) < min_pts_per_cluster_ ||
- static_cast<int> (cluster_iter->indices.size ()) > max_pts_per_cluster_)
+ if (cluster_iter->indices.size () < min_pts_per_cluster_ ||
+ cluster_iter->indices.size () > max_pts_per_cluster_)
{
cluster_iter = clusters_.erase (cluster_iter);
}
pcl::RegionGrowingRGB<PointT, NormalT>::findPointNeighbours ()
{
int point_number = static_cast<int> (indices_->size ());
- std::vector<int> neighbours;
+ pcl::Indices neighbours;
std::vector<float> distances;
point_neighbours_.resize (input_->size (), neighbours);
template <typename PointT, typename NormalT> void
pcl::RegionGrowingRGB<PointT, NormalT>::findSegmentNeighbours ()
{
- std::vector<int> neighbours;
+ pcl::Indices neighbours;
std::vector<float> distances;
segment_neighbours_.resize (number_of_segments_, neighbours);
segment_distances_.resize (number_of_segments_, distances);
for (int i_seg = 0; i_seg < number_of_segments_; i_seg++)
{
- std::vector<int> nghbrs;
+ pcl::Indices nghbrs;
std::vector<float> dist;
findRegionsKNN (i_seg, region_neighbour_number_, nghbrs, dist);
segment_neighbours_[i_seg].swap (nghbrs);
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT,typename NormalT> void
-pcl::RegionGrowingRGB<PointT, NormalT>::findRegionsKNN (int index, int nghbr_number, std::vector<int>& nghbrs, std::vector<float>& dist)
+pcl::RegionGrowingRGB<PointT, NormalT>::findRegionsKNN (pcl::index_t index, pcl::uindex_t nghbr_number, pcl::Indices& nghbrs, std::vector<float>& dist)
{
std::vector<float> distances;
float max_dist = std::numeric_limits<float>::max ();
distances.resize (clusters_.size (), max_dist);
- int number_of_points = num_pts_in_segment_[index];
+ const auto number_of_points = num_pts_in_segment_[index];
//loop through every point in this segment and check neighbours
- for (int i_point = 0; i_point < number_of_points; i_point++)
+ for (pcl::uindex_t i_point = 0; i_point < number_of_points; i_point++)
{
- int point_index = clusters_[index].indices[i_point];
- int number_of_neighbours = static_cast<int> (point_neighbours_[point_index].size ());
+ const auto point_index = clusters_[index].indices[i_point];
+ const auto number_of_neighbours = point_neighbours_[point_index].size ();
//loop through every neighbour of the current point, find out to which segment it belongs
//and if it belongs to neighbouring segment and is close enough then remember segment and its distance
- for (int i_nghbr = 0; i_nghbr < number_of_neighbours; i_nghbr++)
+ for (std::size_t i_nghbr = 0; i_nghbr < number_of_neighbours; i_nghbr++)
{
// find segment
- int segment_index = -1;
- segment_index = point_labels_[ point_neighbours_[point_index][i_nghbr] ];
+ const pcl::index_t segment_index = point_labels_[ point_neighbours_[point_index][i_nghbr] ];
if ( segment_index != index )
{
if (distances[i_seg] < max_dist)
{
segment_neighbours.push (std::make_pair (distances[i_seg], i_seg) );
- if (int (segment_neighbours.size ()) > nghbr_number)
+ if (segment_neighbours.size () > nghbr_number)
segment_neighbours.pop ();
}
}
- int size = std::min<int> (static_cast<int> (segment_neighbours.size ()), nghbr_number);
+ const std::size_t size = std::min<std::size_t> (segment_neighbours.size (), static_cast<std::size_t>(nghbr_number));
nghbrs.resize (size, 0);
dist.resize (size, 0);
- int counter = 0;
+ pcl::uindex_t counter = 0;
while ( !segment_neighbours.empty () && counter < nghbr_number )
{
dist[counter] = segment_neighbours.top ().first;
template <typename PointT, typename NormalT> void
pcl::RegionGrowingRGB<PointT, NormalT>::applyRegionMergingAlgorithm ()
{
- int number_of_points = static_cast<int> (indices_->size ());
-
// calculate color of each segment
std::vector< std::vector<unsigned int> > segment_color;
std::vector<unsigned int> color;
color.resize (3, 0);
segment_color.resize (number_of_segments_, color);
- for (int i_point = 0; i_point < number_of_points; i_point++)
+ for (const auto& point_index : (*indices_))
{
- int point_index = (*indices_)[i_point];
int segment_index = point_labels_[point_index];
segment_color[segment_index][0] += (*input_)[point_index].r;
segment_color[segment_index][1] += (*input_)[point_index].g;
counter[index] += 1;
}
- std::vector< std::vector< std::pair<float, int> > > region_neighbours;
+ std::vector< std::vector< std::pair<float, pcl::index_t> > > region_neighbours;
findRegionNeighbours (region_neighbours, final_segments);
int final_segment_number = homogeneous_region_number;
for (int i_reg = 0; i_reg < homogeneous_region_number; i_reg++)
{
- if (static_cast<int> (num_pts_in_homogeneous_region[i_reg]) < min_pts_per_cluster_)
+ if (num_pts_in_homogeneous_region[i_reg] < min_pts_per_cluster_)
{
if ( region_neighbours[i_reg].empty () )
continue;
num_seg_in_homogeneous_region[i_reg] = 0;
final_segment_number -= 1;
- int nghbr_number = static_cast<int> (region_neighbours[reg_index].size ());
- for (int i_nghbr = 0; i_nghbr < nghbr_number; i_nghbr++)
+ for (auto& nghbr : region_neighbours[reg_index])
{
- if ( segment_labels_[ region_neighbours[reg_index][i_nghbr].second ] == reg_index )
+ if ( segment_labels_[ nghbr.second ] == reg_index )
{
- region_neighbours[reg_index][i_nghbr].first = std::numeric_limits<float>::max ();
- region_neighbours[reg_index][i_nghbr].second = 0;
+ nghbr.first = std::numeric_limits<float>::max ();
+ nghbr.second = 0;
}
}
- nghbr_number = static_cast<int> (region_neighbours[i_reg].size ());
- for (int i_nghbr = 0; i_nghbr < nghbr_number; i_nghbr++)
+ for (const auto& nghbr : region_neighbours[i_reg])
{
- if ( segment_labels_[ region_neighbours[i_reg][i_nghbr].second ] != reg_index )
+ if ( segment_labels_[ nghbr.second ] != reg_index )
{
- std::pair<float, int> pair;
- pair.first = region_neighbours[i_reg][i_nghbr].first;
- pair.second = region_neighbours[i_reg][i_nghbr].second;
- region_neighbours[reg_index].push_back (pair);
+ region_neighbours[reg_index].push_back (nghbr);
}
}
region_neighbours[i_reg].clear ();
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT, typename NormalT> void
-pcl::RegionGrowingRGB<PointT, NormalT>::findRegionNeighbours (std::vector< std::vector< std::pair<float, int> > >& neighbours_out, std::vector< std::vector<int> >& regions_in)
+pcl::RegionGrowingRGB<PointT, NormalT>::findRegionNeighbours (std::vector< std::vector< std::pair<float, pcl::index_t> > >& neighbours_out, std::vector< std::vector<int> >& regions_in)
{
int region_number = static_cast<int> (regions_in.size ());
neighbours_out.clear ();
for (int i_reg = 0; i_reg < region_number; i_reg++)
{
- int segment_num = static_cast<int> (regions_in[i_reg].size ());
- neighbours_out[i_reg].reserve (segment_num * region_neighbour_number_);
- for (int i_seg = 0; i_seg < segment_num; i_seg++)
+ neighbours_out[i_reg].reserve (regions_in[i_reg].size () * region_neighbour_number_);
+ for (const auto& curr_segment : regions_in[i_reg])
{
- int curr_segment = regions_in[i_reg][i_seg];
- int nghbr_number = static_cast<int> (segment_neighbours_[curr_segment].size ());
- std::pair<float, int> pair;
- for (int i_nghbr = 0; i_nghbr < nghbr_number; i_nghbr++)
+ const std::size_t nghbr_number = segment_neighbours_[curr_segment].size ();
+ std::pair<float, pcl::index_t> pair;
+ for (std::size_t i_nghbr = 0; i_nghbr < nghbr_number; i_nghbr++)
{
- int segment_index = segment_neighbours_[curr_segment][i_nghbr];
+ const auto segment_index = segment_neighbours_[curr_segment][i_nghbr];
if ( segment_distances_[curr_segment][i_nghbr] == std::numeric_limits<float>::max () )
continue;
if (segment_labels_[segment_index] != i_reg)
std::vector<int> counter;
counter.resize (num_regions, 0);
- int point_number = static_cast<int> (indices_->size ());
- for (int i_point = 0; i_point < point_number; i_point++)
+ for (const auto& point_index : (*indices_))
{
- int point_index = (*indices_)[i_point];
int index = point_labels_[point_index];
index = segment_labels_[index];
clusters_[index].indices[ counter[index] ] = point_index;
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT, typename NormalT> bool
-pcl::RegionGrowingRGB<PointT, NormalT>::validatePoint (int initial_seed, int point, int nghbr, bool& is_a_seed) const
+pcl::RegionGrowingRGB<PointT, NormalT>::validatePoint (pcl::index_t initial_seed, pcl::index_t point, pcl::index_t nghbr, bool& is_a_seed) const
{
is_a_seed = true;
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT, typename NormalT> void
-pcl::RegionGrowingRGB<PointT, NormalT>::getSegmentFromPoint (int index, pcl::PointIndices& cluster)
+pcl::RegionGrowingRGB<PointT, NormalT>::getSegmentFromPoint (pcl::index_t index, pcl::PointIndices& cluster)
{
cluster.indices.clear ();
// first of all we need to find out if this point belongs to cloud
bool point_was_found = false;
- int number_of_points = static_cast <int> (indices_->size ());
- for (int point = 0; point < number_of_points; point++)
- if ( (*indices_)[point] == index)
+ for (const auto& point : (*indices_))
+ if (point == index)
{
point_was_found = true;
break;
}
// if we have already made the segmentation, then find the segment
// to which this point belongs
- for (auto i_segment = clusters_.cbegin (); i_segment != clusters_.cend (); i_segment++)
+ for (const auto& i_segment : clusters_)
{
- bool segment_was_found = false;
- for (std::size_t i_point = 0; i_point < i_segment->indices.size (); i_point++)
- {
- if (i_segment->indices[i_point] == index)
- {
- segment_was_found = true;
- cluster.indices.clear ();
- cluster.indices.reserve (i_segment->indices.size ());
- std::copy (i_segment->indices.begin (), i_segment->indices.end (), std::back_inserter (cluster.indices));
- break;
- }
- }
- if (segment_was_found)
+ const auto it = std::find (i_segment.indices.cbegin (), i_segment.indices.cend (), index);
+ if (it != i_segment.indices.cend())
{
+ // if segment was found
+ cluster.indices.clear ();
+ cluster.indices.reserve (i_segment.indices.size ());
+ std::copy (i_segment.indices.begin (), i_segment.indices.end (), std::back_inserter (cluster.indices));
break;
}
}// next segment
sac_->getInliers (inliers.indices);
// Get the model coefficients
- Eigen::VectorXf coeff;
+ Eigen::VectorXf coeff (model_->getModelSize ());
sac_->getModelCoefficients (coeff);
// If the user needs optimized coefficients
if (optimize_coefficients_)
{
- Eigen::VectorXf coeff_refined;
+ Eigen::VectorXf coeff_refined (model_->getModelSize ());
model_->optimizeModelCoefficients (inliers.indices, coeff, coeff_refined);
model_coefficients.values.resize (coeff_refined.size ());
memcpy (&model_coefficients.values[0], &coeff_refined[0], coeff_refined.size () * sizeof (float));
#define PCL_SEGMENTATION_IMPL_SEEDED_HUE_SEGMENTATION_H_
#include <pcl/segmentation/seeded_hue_segmentation.h>
+#include <pcl/console/print.h> // for PCL_ERROR
+#include <pcl/search/organized.h> // for OrganizedNeighbor
+#include <pcl/search/kdtree.h> // for KdTree
//////////////////////////////////////////////////////////////////////////////////////////////
void
// Create a bool vector of processed point indices, and initialize it to false
std::vector<bool> processed (cloud.size (), false);
- std::vector<int> nn_indices;
+ Indices nn_indices;
std::vector<float> nn_distances;
// Process all points in the indices vector
- for (const int &i : indices_in.indices)
+ for (const auto &i : indices_in.indices)
{
if (processed[i])
continue;
processed[i] = true;
- std::vector<int> seed_queue;
+ Indices seed_queue;
int sq_idx = 0;
seed_queue.push_back (i);
{
int ret = tree->radiusSearch (seed_queue[sq_idx], tolerance, nn_indices, nn_distances, std::numeric_limits<int>::max());
if(ret == -1)
- PCL_ERROR("[pcl::seededHueSegmentation] radiusSearch returned error code -1");
+ PCL_ERROR("[pcl::seededHueSegmentation] radiusSearch returned error code -1\n");
// Search for sq_idx
if (!ret)
{
sq_idx++;
}
// Copy the seed queue into the output indices
- for (const int &l : seed_queue)
+ for (const auto &l : seed_queue)
indices_out.indices.push_back(l);
}
// This is purely esthetical, can be removed for speed purposes
// Create a bool vector of processed point indices, and initialize it to false
std::vector<bool> processed (cloud.size (), false);
- std::vector<int> nn_indices;
+ Indices nn_indices;
std::vector<float> nn_distances;
// Process all points in the indices vector
- for (const int &i : indices_in.indices)
+ for (const auto &i : indices_in.indices)
{
if (processed[i])
continue;
processed[i] = true;
- std::vector<int> seed_queue;
+ Indices seed_queue;
int sq_idx = 0;
seed_queue.push_back (i);
{
int ret = tree->radiusSearch (seed_queue[sq_idx], tolerance, nn_indices, nn_distances, std::numeric_limits<int>::max());
if(ret == -1)
- PCL_ERROR("[pcl::seededHueSegmentation] radiusSearch returned error code -1");
+ PCL_ERROR("[pcl::seededHueSegmentation] radiusSearch returned error code -1\n");
// Search for sq_idx
if (!ret)
{
sq_idx++;
}
// Copy the seed queue into the output indices
- for (const int &l : seed_queue)
+ for (const auto &l : seed_queue)
indices_out.indices.push_back(l);
}
// This is purely esthetical, can be removed for speed purposes
#include <pcl/common/io.h>
#include <pcl/common/point_tests.h> // for pcl::isFinite
+#include <pcl/search/organized.h> // for OrganizedNeighbor
+#include <pcl/search/kdtree.h> // for KdTree
//////////////////////////////////////////////////////////////////////////
pcl::PointCloud<PointT> &output)
{
// We're interested in a single nearest neighbor only
- std::vector<int> nn_indices (1);
+ Indices nn_indices (1);
std::vector<float> nn_distances (1);
// The input cloud indices that do not have a neighbor in the target cloud
- std::vector<int> src_indices;
+ Indices src_indices;
// Iterate through the source data set
- for (int i = 0; i < static_cast<int> (src.size ()); ++i)
+ for (index_t i = 0; i < static_cast<index_t> (src.size ()); ++i)
{
// Ignore invalid points in the inpout cloud
if (!isFinite (src[i]))
if (!initCompute ())
{
output.width = output.height = 0;
- output.points.clear ();
+ output.clear ();
return;
}
#define PCL_SEGMENTATION_SUPERVOXEL_CLUSTERING_HPP_
#include <pcl/segmentation/supervoxel_clustering.h>
+#include <pcl/common/io.h> // for copyPointCloud
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT>
//double t_prep = timer_.getTime ();
//std::cout << "Placing Seeds" << std::endl;
- std::vector<int> seed_indices;
+ Indices seed_indices;
selectInitialSupervoxelSeeds (seed_indices);
//std::cout << "Creating helpers "<<std::endl;
createSupervoxelHelpers (seed_indices);
{
VoxelData& new_voxel_data = (*leaf_itr)->getData ();
//For every point, get its neighbors, build an index vector, compute normal
- std::vector<int> indices;
+ Indices indices;
indices.reserve (81);
//Push this point
indices.push_back (new_voxel_data.idx_);
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT> void
-pcl::SupervoxelClustering<PointT>::createSupervoxelHelpers (std::vector<int> &seed_indices)
+pcl::SupervoxelClustering<PointT>::createSupervoxelHelpers (Indices &seed_indices)
{
supervoxel_helpers_.clear ();
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT> void
-pcl::SupervoxelClustering<PointT>::selectInitialSupervoxelSeeds (std::vector<int> &seed_indices)
+pcl::SupervoxelClustering<PointT>::selectInitialSupervoxelSeeds (Indices &seed_indices)
{
//TODO THIS IS BAD - SEEDING SHOULD BE BETTER
//TODO Switch to assigning leaves! Don't use Octree!
std::vector<int> seed_indices_orig;
seed_indices_orig.resize (num_seeds, 0);
seed_indices.clear ();
- std::vector<int> closest_index;
+ pcl::Indices closest_index;
std::vector<float> distance;
closest_index.resize(1,0);
distance.resize(1,0);
seed_indices_orig[i] = closest_index[0];
}
- std::vector<int> neighbors;
+ pcl::Indices neighbors;
std::vector<float> sqr_distances;
seed_indices.reserve (seed_indices_orig.size ());
float search_radius = 0.5f*seed_resolution_;
// This is 1/20th of the number of voxels which fit in a planar slice through search volume
// Area of planar slice / area of voxel side. (Note: This is smaller than the value mentioned in the original paper)
float min_points = 0.05f * (search_radius)*(search_radius) * 3.1415926536f / (resolution_*resolution_);
- for (const int &index_orig : seed_indices_orig)
+ for (const auto &index_orig : seed_indices_orig)
{
int num = voxel_kdtree_->radiusSearch (index_orig, search_radius , neighbors, sqr_distances);
- int min_index = index_orig;
if ( num > min_points)
{
- seed_indices.push_back (min_index);
+ seed_indices.push_back (index_orig);
}
}
sv_itr->removeAllLeaves ();
}
- std::vector<int> closest_index;
+ Indices closest_index;
std::vector<float> distance;
//Now go through each supervoxel, find voxel closest to its center, add it in
for (typename HelperListT::iterator sv_itr = supervoxel_helpers_.begin (); sv_itr != supervoxel_helpers_.end (); ++sv_itr)
for (auto leaf_itr = leaves_.cbegin (); leaf_itr != leaves_.cend (); ++leaf_itr)
{
VoxelData& voxel_data = (*leaf_itr)->getData ();
- std::vector<int> indices;
+ Indices indices;
indices.reserve (81);
//Push this point
indices.push_back (voxel_data.idx_);
// find the 'label' field index
std::vector <pcl::PCLPointField> fields;
int label_idx = -1;
- pcl::PointCloud <PointT> point;
label_idx = pcl::getFieldIndex<PointT> ("label", fields);
if (label_idx != -1)
{
- for (std::size_t i = 0; i < in->size (); i++)
+ for (const auto& point : (*in))
{
// get the 'label' field
std::uint32_t label;
- memcpy (&label, reinterpret_cast<char*> (&(*in)[i]) + fields[label_idx].offset, sizeof(std::uint32_t));
+ memcpy (&label, reinterpret_cast<const char*> (&point) + fields[label_idx].offset, sizeof(std::uint32_t));
if (static_cast<int> (label) == label_num)
{
- pcl::PointXYZ point;
+ pcl::PointXYZ tmp;
// X Y Z
- point.x = (*in)[i].x;
- point.y = (*in)[i].y;
- point.z = (*in)[i].z;
- out->points.push_back (point);
+ tmp.x = point.x;
+ tmp.y = point.y;
+ tmp.z = point.z;
+ out->push_back (tmp);
}
}
out->width = out->size ();
template <typename PointT> void
pcl::UnaryClassifier<PointT>::queryFeatureDistances (std::vector<pcl::PointCloud<pcl::FPFHSignature33>::Ptr> &trained_features,
pcl::PointCloud<pcl::FPFHSignature33>::Ptr query_features,
- std::vector<int> &indi,
+ pcl::Indices &indi,
std::vector<float> &dist)
{
// estimate the total number of row's needed
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT> void
-pcl::UnaryClassifier<PointT>::assignLabels (std::vector<int> &indi,
+pcl::UnaryClassifier<PointT>::assignLabels (pcl::Indices &indi,
std::vector<float> &dist,
int n_feature_means,
float feature_threshold,
computeFPFH (tmp_cloud, input_cloud_features, normal_radius_search_, fpfh_radius_search_);
// query the distances from the input data features to all trained features
- std::vector<int> indices;
+ Indices indices;
std::vector<float> distance;
queryFeatureDistances (trained_features_, input_cloud_features, indices, distance);
#pragma once
-#include <pcl/pcl_base.h>
#include <pcl/point_types.h>
#include <pcl/point_cloud.h>
#include <pcl/segmentation/supervoxel_clustering.h>
#pragma once
-#include <pcl/segmentation/boost.h>
#include <pcl/memory.h>
#include <pcl/pcl_base.h>
#include <pcl/pcl_macros.h>
#include <pcl/search/search.h>
#include <string>
#include <set>
+#include <boost/graph/adjacency_list.hpp> // for adjacency_list
namespace pcl
{
PointCloudLPtr& labels,
std::vector<pcl::PointIndices>& label_indices);
- /** \brief Perform a refinement of an initial segmentation, by comparing points to adjacent regions detected by the initial segmentation.
- * \param [in] model_coefficients The list of segmented model coefficients
- * \param [in] inlier_indices The list of segmented inlier indices, corresponding to each model
- * \param [in] centroids The list of centroids corresponding to each segmented plane
- * \param [in] covariances The list of covariances corresponding to each segemented plane
- * \param [in] labels The labels produced by the initial segmentation
- * \param [in] label_indices The list of indices corresponding to each label
- */
- PCL_DEPRECATED(1, 12, "centroids and covariances parameters are not used; they are deprecated and will be removed in future releases")
- void
- refine (std::vector<ModelCoefficients>& model_coefficients,
- std::vector<PointIndices>& inlier_indices,
- std::vector<Eigen::Vector4f, Eigen::aligned_allocator<Eigen::Vector4f> >& centroids,
- std::vector <Eigen::Matrix3f, Eigen::aligned_allocator<Eigen::Matrix3f> >& covariances,
- PointCloudLPtr& labels,
- std::vector<pcl::PointIndices>& label_indices)
- {
- pcl::utils::ignore(centroids, covariances);
- refine(model_coefficients, inlier_indices, labels, label_indices);
- }
-
protected:
/** \brief A pointer to the input normals */
#pragma once
#include <pcl/pcl_base.h>
-#include <pcl/search/search.h>
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
* \param[out] ground indices of points determined to be ground returns.
*/
virtual void
- extract (std::vector<int>& ground);
+ extract (Indices& ground);
protected:
#include <boost/graph/graph_concepts.hpp>
#include <boost/concept/assert.hpp>
-#include <Eigen/Dense>
+#include <Eigen/Core> // for Matrix
namespace pcl
{
#pragma once
#include <Eigen/Core>
-#include <vector>
#include <pcl/memory.h>
#include <pcl/pcl_macros.h>
#include <pcl/search/search.h>
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
-#include <list>
-#include <cmath>
-#include <ctime>
namespace pcl
{
* "Segmentation of point clouds using smoothness constraint"
* by T. Rabbania, F. A. van den Heuvelb, G. Vosselmanc.
* In addition to residual test, the possibility to test curvature is added.
+ * \ingroup segmentation
*/
template <typename PointT, typename NormalT>
class PCL_EXPORTS RegionGrowing : public pcl::PCLBase<PointT>
~RegionGrowing ();
/** \brief Get the minimum number of points that a cluster needs to contain in order to be considered valid. */
- int
+ pcl::uindex_t
getMinClusterSize ();
/** \brief Set the minimum number of points that a cluster needs to contain in order to be considered valid. */
void
- setMinClusterSize (int min_cluster_size);
+ setMinClusterSize (pcl::uindex_t min_cluster_size);
/** \brief Get the maximum number of points that a cluster needs to contain in order to be considered valid. */
- int
+ pcl::uindex_t
getMaxClusterSize ();
/** \brief Set the maximum number of points that a cluster needs to contain in order to be considered valid. */
void
- setMaxClusterSize (int max_cluster_size);
+ setMaxClusterSize (pcl::uindex_t max_cluster_size);
/** \brief Returns the flag value. This flag signalizes which mode of algorithm will be used.
* If it is set to true than it will work as said in the article. This means that
* \param[out] cluster cluster to which the point belongs.
*/
virtual void
- getSegmentFromPoint (int index, pcl::PointIndices& cluster);
+ getSegmentFromPoint (pcl::index_t index, pcl::PointIndices& cluster);
/** \brief If the cloud was successfully segmented, then function
* returns colored cloud. Otherwise it returns an empty pointer.
* \param[out] is_a_seed this value is set to true if the point with index 'nghbr' can serve as the seed
*/
virtual bool
- validatePoint (int initial_seed, int point, int nghbr, bool& is_a_seed) const;
+ validatePoint (pcl::index_t initial_seed, pcl::index_t point, pcl::index_t nghbr, bool& is_a_seed) const;
/** \brief This function simply assembles the regions from list of point labels.
* Each cluster is an array of point indices.
protected:
/** \brief Stores the minimum number of points that a cluster needs to contain in order to be considered valid. */
- int min_pts_per_cluster_;
+ pcl::uindex_t min_pts_per_cluster_;
/** \brief Stores the maximum number of points that a cluster needs to contain in order to be considered valid. */
- int max_pts_per_cluster_;
+ pcl::uindex_t max_pts_per_cluster_;
/** \brief Flag that signalizes if the smoothness constraint will be used. */
bool smooth_mode_flag_;
NormalPtr normals_;
/** \brief Contains neighbours of each point. */
- std::vector<std::vector<int> > point_neighbours_;
+ std::vector<pcl::Indices> point_neighbours_;
/** \brief Point labels that tells to which segment each point belongs. */
std::vector<int> point_labels_;
bool normal_flag_;
/** \brief Tells how much points each segment contains. Used for reserving memory. */
- std::vector<int> num_pts_in_segment_;
+ std::vector<pcl::uindex_t> num_pts_in_segment_;
/** \brief After the segmentation it will contain the segments. */
std::vector <pcl::PointIndices> clusters_;
* Description can be found in the article
* "Color-based segmentation of point clouds"
* by Qingming Zhan, Yubin Liang, Yinghui Xiao
+ * \ingroup segmentation
*/
template <typename PointT, typename NormalT = pcl::Normal>
class PCL_EXPORTS RegionGrowingRGB : public RegionGrowing<PointT, NormalT>
* \param cluster
*/
void
- getSegmentFromPoint (int index, pcl::PointIndices& cluster) override;
+ getSegmentFromPoint (index_t index, pcl::PointIndices& cluster) override;
protected:
* \param[out] dist the array of distances to the corresponding neighbours
*/
void
- findRegionsKNN (int index, int nghbr_number, std::vector<int>& nghbrs, std::vector<float>& dist);
+ findRegionsKNN (pcl::index_t index, pcl::uindex_t nghbr_number, Indices& nghbrs, std::vector<float>& dist);
/** \brief This function implements the merging algorithm described in the article
* "Color-based segmentation of point clouds"
* to the corresponding homogeneous region.
*/
void
- findRegionNeighbours (std::vector< std::vector< std::pair<float, int> > >& neighbours_out, std::vector< std::vector<int> >& regions_in);
+ findRegionNeighbours (std::vector< std::vector< std::pair<float, pcl::index_t> > >& neighbours_out, std::vector< std::vector<int> >& regions_in);
/** \brief This function simply assembles the regions from list of point labels.
* \param[in] num_pts_in_region for each final region it stores the corresponding number of points in it
* \param[out] is_a_seed this value is set to true if the point with index 'nghbr' can serve as the seed
*/
bool
- validatePoint (int initial_seed, int point, int nghbr, bool& is_a_seed) const override;
+ validatePoint (index_t initial_seed, index_t point, index_t nghbr, bool& is_a_seed) const override;
protected:
std::vector< std::vector<float> > point_distances_;
/** \brief Stores the neighboures for the corresponding segments. */
- std::vector< std::vector<int> > segment_neighbours_;
+ std::vector< pcl::Indices > segment_neighbours_;
/** \brief Stores distances for the segment neighbours from segment_neighbours_ */
std::vector< std::vector<float> > segment_distances_;
#pragma once
-#include <pcl/segmentation/boost.h>
#include <pcl/segmentation/plane_coefficient_comparator.h>
namespace pcl
#include <pcl/pcl_base.h>
#include <pcl/point_types_conversion.h>
-#include <pcl/search/pcl_search.h>
+#include <pcl/search/search.h> // for Search
namespace pcl
{
#include <pcl/pcl_base.h>
#include <pcl/pcl_macros.h>
-#include <pcl/search/pcl_search.h>
+#include <pcl/search/search.h> // for Search
namespace pcl
{
const typename pcl::search::Search<PointT>::Ptr &tree,
pcl::PointCloud<PointT> &output);
- template <typename PointT>
- PCL_DEPRECATED(1, 12, "tgt parameter is not used; it is deprecated and will be removed in future releases")
- inline void getPointCloudDifference (
- const pcl::PointCloud<PointT> &src,
- const pcl::PointCloud<PointT> & /* tgt */,
- double threshold,
- const typename pcl::search::Search<PointT>::Ptr &tree,
- pcl::PointCloud<PointT> &output)
- {
- getPointCloudDifference<PointT> (src, threshold, tree, output);
- }
-
////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////
#include <pcl/octree/octree_search.h>
#include <pcl/octree/octree_pointcloud_adjacency.h>
#include <pcl/search/search.h>
-#include <pcl/segmentation/boost.h>
+#include <boost/ptr_container/ptr_list.hpp> // for ptr_list
*/
SupervoxelClustering (float voxel_resolution, float seed_resolution);
- PCL_DEPRECATED(1, 12, "constructor with flag for using the single camera transform is deprecated. Default behavior is now to use the transform for organized clouds, and not use it for unorganized. Use setUseSingleCameraTransform() to override the defaults.")
- SupervoxelClustering (float voxel_resolution, float seed_resolution, bool) : SupervoxelClustering (voxel_resolution, seed_resolution) { }
-
/** \brief This destructor destroys the cloud, normals and search method used for
* finding neighbors. In other words it frees memory.
*/
refineSupervoxels (int num_itr, std::map<std::uint32_t,typename Supervoxel<PointT>::Ptr > &supervoxel_clusters);
////////////////////////////////////////////////////////////
- /** \brief Returns an RGB colorized cloud showing superpixels
- * Otherwise it returns an empty pointer.
- * Points that belong to the same supervoxel have the same color.
- * But this function doesn't guarantee that different segments will have different
- * color(it's random). Points that are unlabeled will be black
- * \note This will expand the label_colors_ vector so that it can accommodate all labels
- */
- PCL_DEPRECATED(1, 12, "use getLabeledCloud() instead. An example of how to display and save with colorized labels can be found in examples/segmentation/example_supervoxels.cpp")
- typename pcl::PointCloud<PointXYZRGBA>::Ptr
- getColoredCloud () const
- {
- return pcl::PointCloud<PointXYZRGBA>::Ptr (new pcl::PointCloud<PointXYZRGBA>);
- }
-
/** \brief Returns a deep copy of the voxel centroid cloud */
typename pcl::PointCloud<PointT>::Ptr
getVoxelCentroidCloud () const;
* \param[out] seed_indices The selected leaf indices
*/
void
- selectInitialSupervoxelSeeds (std::vector<int> &seed_indices);
+ selectInitialSupervoxelSeeds (Indices &seed_indices);
/** \brief This method creates the internal supervoxel helpers based on the provided seed points
* \param[in] seed_indices Indices of the leaves to use as seeds
*/
void
- createSupervoxelHelpers (std::vector<int> &seed_indices);
+ createSupervoxelHelpers (Indices &seed_indices);
/** \brief This performs the superpixel evolution */
void
#include <pcl/features/fpfh.h>
#include <pcl/features/normal_3d.h>
-#include <pcl/filters/filter_indices.h>
-#include <pcl/segmentation/extract_clusters.h>
-
#include <pcl/ml/kmeans.h>
namespace pcl
void
queryFeatureDistances (std::vector<pcl::PointCloud<pcl::FPFHSignature33>::Ptr> &trained_features,
pcl::PointCloud<pcl::FPFHSignature33>::Ptr query_features,
- std::vector<int> &indi,
+ pcl::Indices &indi,
std::vector<float> &dist);
void
- assignLabels (std::vector<int> &indi,
+ assignLabels (pcl::Indices &indi,
std::vector<float> &dist,
int n_feature_means,
float feature_threshold,
*/
#include <pcl/impl/instantiate.hpp>
-#include <pcl/point_types.h>
#include <pcl/segmentation/impl/extract_clusters.hpp>
#include <pcl/segmentation/impl/extract_labeled_clusters.hpp>
+#include <pcl/point_types.h>
// Instantiations of specific point types
#ifdef PCL_ONLY_CORE_POINT_TYPES
- PCL_INSTANTIATE(EuclideanClusterExtraction, (pcl::PointXYZ)(pcl::PointXYZI)(pcl::PointXYZRGBA)(pcl::PointXYZRGB))
- PCL_INSTANTIATE(extractEuclideanClusters, (pcl::PointXYZ)(pcl::PointXYZI)(pcl::PointXYZRGBA)(pcl::PointXYZRGB))
- PCL_INSTANTIATE(extractEuclideanClusters_indices, (pcl::PointXYZ)(pcl::PointXYZI)(pcl::PointXYZRGBA)(pcl::PointXYZRGB))
+PCL_INSTANTIATE(EuclideanClusterExtraction,
+ (pcl::PointXYZ)(pcl::PointXYZI)(pcl::PointXYZRGBA)(pcl::PointXYZRGB))
+PCL_INSTANTIATE(extractEuclideanClusters,
+ (pcl::PointXYZ)(pcl::PointXYZI)(pcl::PointXYZRGBA)(pcl::PointXYZRGB))
+PCL_INSTANTIATE(extractEuclideanClusters_indices,
+ (pcl::PointXYZ)(pcl::PointXYZI)(pcl::PointXYZRGBA)(pcl::PointXYZRGB))
#else
- PCL_INSTANTIATE(EuclideanClusterExtraction, PCL_XYZ_POINT_TYPES)
- PCL_INSTANTIATE(extractEuclideanClusters, PCL_XYZ_POINT_TYPES)
- PCL_INSTANTIATE(extractEuclideanClusters_indices, PCL_XYZ_POINT_TYPES)
+PCL_INSTANTIATE(EuclideanClusterExtraction, PCL_XYZ_POINT_TYPES)
+PCL_INSTANTIATE(extractEuclideanClusters, PCL_XYZ_POINT_TYPES)
+PCL_INSTANTIATE(extractEuclideanClusters_indices, PCL_XYZ_POINT_TYPES)
#endif
PCL_INSTANTIATE(LabeledEuclideanClusterExtraction, PCL_XYZL_POINT_TYPES)
+PCL_INSTANTIATE(extractLabeledEuclideanClusters_deprecated, PCL_XYZL_POINT_TYPES)
PCL_INSTANTIATE(extractLabeledEuclideanClusters, PCL_XYZL_POINT_TYPES)
void
pcl::segmentation::grabcut::buildGMMs (const Image& image,
- const std::vector<int>& indices,
+ const Indices& indices,
const std::vector<SegmentationValue>& hard_segmentation,
std::vector<std::size_t>& components,
GMM& background_GMM, GMM& foreground_GMM)
void
pcl::segmentation::grabcut::learnGMMs (const Image& image,
- const std::vector<int>& indices,
+ const Indices& indices,
const std::vector<SegmentationValue>& hard_segmentation,
std::vector<std::size_t>& components,
GMM& background_GMM, GMM& foreground_GMM)
set(build FALSE)
find_package(OpenGL)
-if(APPLE)
- # homebrew's FindGLEW module is not in good shape
- find_package(glew CONFIG)
-ELSE()
- find_package(GLEW)
-ENDIF()
+
+find_package(GLEW)
PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" OFF)
PCL_SUBSYS_DEPEND(build "${SUBSYS_NAME}" DEPS ${SUBSYS_DEPS} EXT_DEPS opengl glew)
#include <pcl/memory.h>
#include <pcl/pcl_macros.h>
-#include <Eigen/Dense>
+#include <Eigen/Geometry> // for Isometry3d
#include <Eigen/StdVector>
namespace pcl {
#pragma once
-#include <pcl/io/pcd_io.h>
#include <pcl/simulation/glsl_shader.h>
#include <pcl/PolygonMesh.h>
#include <pcl/memory.h>
#include <pcl/pcl_config.h>
#include <pcl/pcl_macros.h>
+#include <pcl/point_cloud.h> // for PointCloud
#include <pcl/point_types.h>
#if defined(_WIN32) && !defined(APIENTRY) && !defined(__CYGWIN__)
#pragma once
-#include <pcl/common/transforms.h>
#include <pcl/range_image/range_image_planar.h>
#include <pcl/simulation/camera.h>
#include <pcl/simulation/glsl_shader.h>
#include <pcl/simulation/camera.h>
-#include <iostream>
-
using namespace Eigen;
using namespace pcl::simulation;
#include <pcl/simulation/model.h>
+#include <pcl/conversions.h> // for fromPCLPointCloud2
using namespace pcl::simulation;
pcl::simulation::TriangleMeshModel::TriangleMeshModel(pcl::PolygonMesh::Ptr plg)
Eigen::Vector4f tmp;
for (const auto& polygon : plg->polygons) {
- for (const unsigned int& point : polygon.vertices) {
+ for (const auto& point : polygon.vertices) {
tmp = newcloud[point].getVector4fMap();
vertices.push_back(Vertex(Eigen::Vector3f(tmp(0), tmp(1), tmp(2)),
Eigen::Vector3f(newcloud[point].r / 255.0f,
pcl::fromPCLPointCloud2(plg->cloud, newcloud);
Eigen::Vector4f tmp;
for (const auto& polygon : plg->polygons) {
- for (const unsigned int& point : polygon.vertices) {
+ for (const auto& point : polygon.vertices) {
tmp = newcloud[point].getVector4fMap();
vertices.push_back(Vertex(Eigen::Vector3f(tmp(0), tmp(1), tmp(2)),
Eigen::Vector3f(1.0, 1.0, 1.0)));
#include <pcl/common/time.h>
+#include <pcl/common/transforms.h> // for transformPointCloud
#include <pcl/simulation/range_likelihood.h>
#include <pcl/pcl_config.h>
#include <GL/glu.h>
#endif
-#include <ctime>
#include <random>
// For adding noise:
*/
#include <pcl/common/time.h> // for getTime
+#include <pcl/io/pcd_io.h> // for PCDWriter
#include <pcl/memory.h>
-#include <Eigen/Dense>
-
#include "simulation_io.hpp"
#include <cmath>
*
*/
-#include <pcl/common/common.h>
-#include <pcl/common/transforms.h>
-#include <pcl/console/parse.h>
#include <pcl/console/print.h>
#include <pcl/console/time.h>
-#include <pcl/features/normal_3d.h>
#include <pcl/io/pcd_io.h>
#include <pcl/io/vtk_lib_io.h>
#include <pcl/simulation/camera.h>
#include <pcl/simulation/model.h>
#include <pcl/simulation/range_likelihood.h>
#include <pcl/simulation/scene.h>
-#include <pcl/surface/gp3.h>
#include <pcl/memory.h>
#include <pcl/pcl_config.h>
-#include <pcl/point_types.h>
-
-#include <Eigen/Dense>
#include <GL/glew.h>
*/
#include <pcl/common/common.h>
-#include <pcl/common/transforms.h>
-#include <pcl/console/parse.h>
#include <pcl/console/print.h>
-#include <pcl/features/normal_3d.h>
#include <pcl/io/pcd_io.h>
#include <pcl/io/vtk_lib_io.h>
#include <pcl/range_image/range_image_planar.h> // RangeImage
#include <pcl/simulation/model.h>
#include <pcl/simulation/range_likelihood.h>
#include <pcl/simulation/scene.h>
-#include <pcl/surface/gp3.h>
#include <pcl/visualization/cloud_viewer.h> // Pop-up viewer
#include <pcl/memory.h>
#include <pcl/pcl_config.h>
#include <pcl/point_types.h>
-#include <Eigen/Dense>
-
#include <GL/glew.h>
#ifdef OPENGL_IS_A_FRAMEWORK
#include <pcl/common/common.h>
#include <pcl/common/time.h> // for getTime
-#include <pcl/common/transforms.h>
#include <pcl/console/parse.h>
#include <pcl/console/print.h>
#include <pcl/console/time.h>
-#include <pcl/features/normal_3d.h>
#include <pcl/io/pcd_io.h>
#include <pcl/io/vtk_lib_io.h>
#include <pcl/range_image/range_image_planar.h> // RangeImage
#include <pcl/simulation/model.h>
#include <pcl/simulation/range_likelihood.h>
#include <pcl/simulation/scene.h>
-#include <pcl/surface/gp3.h>
#include <pcl/visualization/cloud_viewer.h> // Pop-up viewer
#include <pcl/visualization/histogram_visualizer.h>
#include <pcl/visualization/keyboard_event.h>
#include <pcl/visualization/pcl_visualizer.h>
-#include <pcl/visualization/point_cloud_handlers.h>
#include <pcl/visualization/point_picking_event.h>
#include <pcl/memory.h>
#include <pcl/pcl_config.h>
#include <pcl/point_types.h>
-#include <Eigen/Dense>
#include <Eigen/Geometry>
#include <vtkPolyDataReader.h>
}
// Else, if a single point has been selected
- std::stringstream ss;
- ss << event.getPointIndex();
+ std::string pointIndexStr = std::to_string(event.getPointIndex());
// Get the cloud's fields
for (std::size_t i = 0; i < cloud->fields.size(); ++i) {
if (!isMultiDimensionalFeatureField(cloud->fields[i]))
continue;
ph_global.addFeatureHistogram(
- *cloud, cloud->fields[i].name, event.getPointIndex(), ss.str());
+ *cloud, cloud->fields[i].name, event.getPointIndex(), pointIndexStr);
}
if (p) {
pcl::PointXYZ pos;
event.getPoint(pos.x, pos.y, pos.z);
- p->addText3D<pcl::PointXYZ>(ss.str(), pos, 0.0005, 1.0, 1.0, 1.0, ss.str());
+ p->addText3D<pcl::PointXYZ>(
+ pointIndexStr, pos, 0.0005, 1.0, 1.0, 1.0, pointIndexStr);
}
ph_global.spinOnce();
}
// Go through VTK files
for (std::size_t i = 0; i < vtk_file_indices.size(); ++i) {
+ const char* vtk_file = argv[vtk_file_indices[i]];
// Load file
tt.tic();
print_highlight(stderr, "Loading ");
- print_value(stderr, "%s ", argv[vtk_file_indices.at(i)]);
+ print_value(stderr, "%s ", vtk_file);
vtkPolyDataReader* reader = vtkPolyDataReader::New();
- reader->SetFileName(argv[vtk_file_indices.at(i)]);
+ reader->SetFileName(vtk_file);
reader->Update();
vtkSmartPointer<vtkPolyData> polydata = reader->GetOutput();
if (!polydata)
}
// Add as actor
- std::stringstream cloud_name("vtk-");
- cloud_name << argv[vtk_file_indices.at(i)] << "-" << i;
- p->addModelFromPolyData(polydata, cloud_name.str(), viewport);
+ const std::string cloud_name =
+ "vtk-" + std::string(vtk_file) + "-" + std::to_string(i);
+ p->addModelFromPolyData(polydata, cloud_name, viewport);
// Change the shape rendered color
if (fcolorparam && fcolor_r.size() > i && fcolor_g.size() > i &&
fcolor_r[i],
fcolor_g[i],
fcolor_b[i],
- cloud_name.str());
+ cloud_name);
// Change the shape rendered point size
if (!psize.empty())
p->setShapeRenderingProperties(
- pcl::visualization::PCL_VISUALIZER_POINT_SIZE, psize.at(i), cloud_name.str());
+ pcl::visualization::PCL_VISUALIZER_POINT_SIZE, psize.at(i), cloud_name);
// Change the shape rendered opacity
if (!opaque.empty())
p->setShapeRenderingProperties(
- pcl::visualization::PCL_VISUALIZER_OPACITY, opaque.at(i), cloud_name.str());
+ pcl::visualization::PCL_VISUALIZER_OPACITY, opaque.at(i), cloud_name);
}
pcl::PCLPointCloud2::Ptr cloud;
// Go through PCD files
for (std::size_t i = 0; i < p_file_indices.size(); ++i) {
+ const std::string p_file = argv[p_file_indices[i]];
cloud.reset(new pcl::PCLPointCloud2);
Eigen::Vector4f origin;
Eigen::Quaternionf orientation;
int version;
print_highlight(stderr, "Loading ");
- print_value(stderr, "%s ", argv[p_file_indices.at(i)]);
+ print_value(stderr, "%s ", p_file.c_str());
tt.tic();
if (pcd.read(argv[p_file_indices.at(i)], *cloud, origin, orientation, version) < 0)
return (-1);
- std::stringstream cloud_name;
-
// ---[ Special check for 1-point multi-dimension histograms
if (cloud->fields.size() == 1 && isMultiDimensionalFeatureField(cloud->fields[0])) {
- cloud_name << argv[p_file_indices.at(i)];
-
if (!ph)
ph.reset(new pcl::visualization::PCLHistogramVisualizer);
print_info("[done, ");
print_info(" points]\n");
pcl::getMinMax(*cloud, 0, cloud->fields[0].name, min_p, max_p);
- ph->addFeatureHistogram(*cloud, cloud->fields[0].name, cloud_name.str());
+ ph->addFeatureHistogram(*cloud, cloud->fields[0].name, p_file);
continue;
}
- cloud_name << argv[p_file_indices.at(i)] << "-" << i;
-
// Create the PCLVisualizer object here on the first encountered XYZ file
if (!p) {
p.reset(new pcl::visualization::PCLVisualizer(argc, argv, "PCD viewer"));
new pcl::visualization::PointCloudGeometryHandlerXYZ<pcl::PCLPointCloud2>(
cloud));
// Add the cloud to the renderer
- // p->addPointCloud<pcl::PointXYZ> (cloud_xyz, geometry_handler, color_handler,
- // cloud_name.str (), viewport);
+ const std::string cloud_name = p_file + "-" + std::to_string(i);
p->addPointCloud(cloud,
geometry_handler,
color_handler,
origin,
orientation,
- cloud_name.str(),
+ cloud_name,
viewport);
+ const std::string cloud_name_normals = cloud_name + "-normals";
// If normal lines are enabled
if (normals != 0) {
int normal_idx = pcl::getFieldIndex(*cloud, "normal_x");
pcl::PointCloud<pcl::Normal>::Ptr cloud_normals(new pcl::PointCloud<pcl::Normal>);
pcl::fromPCLPointCloud2(*cloud, *cloud_normals);
- std::stringstream cloud_name_normals;
- cloud_name_normals << argv[p_file_indices.at(i)] << "-" << i << "-normals";
p->addPointCloudNormals<pcl::PointXYZ, pcl::Normal>(cloud_xyz,
cloud_normals,
normals,
normals_scale,
- cloud_name_normals.str(),
+ cloud_name_normals,
viewport);
}
pcl::PointCloud<pcl::PrincipalCurvatures>::Ptr cloud_pc(
new pcl::PointCloud<pcl::PrincipalCurvatures>);
pcl::fromPCLPointCloud2(*cloud, *cloud_pc);
- std::stringstream cloud_name_normals_pc;
- cloud_name_normals_pc << argv[p_file_indices.at(i)] << "-" << i << "-normals";
int factor = (std::min)(normals, pc);
p->addPointCloudNormals<pcl::PointXYZ, pcl::Normal>(cloud_xyz,
cloud_normals,
factor,
normals_scale,
- cloud_name_normals_pc.str(),
+ cloud_name_normals,
viewport);
- p->setPointCloudRenderingProperties(pcl::visualization::PCL_VISUALIZER_COLOR,
- 1.0,
- 0.0,
- 0.0,
- cloud_name_normals_pc.str());
- p->setPointCloudRenderingProperties(pcl::visualization::PCL_VISUALIZER_LINE_WIDTH,
- 3,
- cloud_name_normals_pc.str());
- cloud_name_normals_pc << "-pc";
+ p->setPointCloudRenderingProperties(
+ pcl::visualization::PCL_VISUALIZER_COLOR, 1.0, 0.0, 0.0, cloud_name_normals);
+ p->setPointCloudRenderingProperties(
+ pcl::visualization::PCL_VISUALIZER_LINE_WIDTH, 3, cloud_name_normals);
+ const auto cloud_name_normals_pc = cloud_name_normals + "-pc";
p->addPointCloudPrincipalCurvatures<pcl::PointXYZ, pcl::Normal>(
cloud_xyz,
cloud_normals,
cloud_pc,
factor,
pc_scale,
- cloud_name_normals_pc.str(),
+ cloud_name_normals_pc,
viewport);
- p->setPointCloudRenderingProperties(pcl::visualization::PCL_VISUALIZER_LINE_WIDTH,
- 3,
- cloud_name_normals_pc.str());
+ p->setPointCloudRenderingProperties(
+ pcl::visualization::PCL_VISUALIZER_LINE_WIDTH, 3, cloud_name_normals_pc);
}
// Add every dimension as a possible color
pcl::PCLPointCloud2>(cloud, cloud->fields[f].name));
}
// Add the cloud to the renderer
- // p->addPointCloud<pcl::PointXYZ> (cloud_xyz, color_handler, cloud_name.str (),
- // viewport);
p->addPointCloud(
- cloud, color_handler, origin, orientation, cloud_name.str(), viewport);
+ cloud, color_handler, origin, orientation, cloud_name, viewport);
}
}
// Additionally, add normals as a handler
new pcl::visualization::PointCloudGeometryHandlerSurfaceNormal<
pcl::PCLPointCloud2>(cloud));
if (geometry_handler->isCapable())
- // p->addPointCloud<pcl::PointXYZ> (cloud_xyz, geometry_handler, cloud_name.str
- // (), viewport);
p->addPointCloud(
- cloud, geometry_handler, origin, orientation, cloud_name.str(), viewport);
+ cloud, geometry_handler, origin, orientation, cloud_name, viewport);
// Set immediate mode rendering ON
p->setPointCloudRenderingProperties(
- pcl::visualization::PCL_VISUALIZER_IMMEDIATE_RENDERING, 1.0, cloud_name.str());
+ pcl::visualization::PCL_VISUALIZER_IMMEDIATE_RENDERING, 1.0, cloud_name);
// Change the cloud rendered point size
if (!psize.empty())
p->setPointCloudRenderingProperties(
- pcl::visualization::PCL_VISUALIZER_POINT_SIZE, psize.at(i), cloud_name.str());
+ pcl::visualization::PCL_VISUALIZER_POINT_SIZE, psize.at(i), cloud_name);
// Change the cloud rendered opacity
if (!opaque.empty())
p->setPointCloudRenderingProperties(
- pcl::visualization::PCL_VISUALIZER_OPACITY, opaque.at(i), cloud_name.str());
-
- // Reset camera viewpoint to center of cloud if camera parameters were not passed
- // manually and this is the first loaded cloud
- // if (i == 0 && !p->cameraParamsSet ())
- // p->resetCameraViewpoint (cloud_name.str ());
+ pcl::visualization::PCL_VISUALIZER_OPACITY, opaque.at(i), cloud_name);
}
////////////////////////////////////////////////////////////////
#pragma once
-#include <pcl/io/pcd_io.h>
#include <pcl/io/vtk_lib_io.h>
#include <pcl/simulation/camera.h>
#include <pcl/simulation/range_likelihood.h>
set(build TRUE)
PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ON)
-PCL_SUBSYS_DEPEND(build "${SUBSYS_NAME}" DEPS ${SUBSYS_DEPS} OPT_DEPS qhull)
+PCL_SUBSYS_DEPEND(build "${SUBSYS_NAME}" DEPS ${SUBSYS_DEPS} OPT_DEPS qhull vtk)
PCL_ADD_DOC("${SUBSYS_NAME}")
set(BUILD_surface_on_nurbs 0 CACHE BOOL "Fitting NURBS to point-clouds using openNURBS")
if(BUILD_surface_on_nurbs)
+ # Add the GCC exclusive rules for -Werror only for 3rd party OpenNURBS compile
+ # -Wno-error = (deprecated-declarations, class-memaccess, deprecated-copy, uninitialized, parentheses)
+ # Note: class-memaccess warning added exactly in GCC 8.0, see here: https://www.gnu.org/software/gcc/gcc-8/changes.html
+ # deprecated-copy warning added exactly in GCC 9.1, see here: https://www.gnu.org/software/gcc/gcc-9/changes.html
+ # Since GCC8 and GCC9 are still widely-used, compile version check is required for the two options.
+ if(CMAKE_COMPILER_IS_GNUCXX)
+ add_compile_options("-Wno-error=deprecated-declarations" "-Wno-error=uninitialized" "-Wno-error=parentheses")
+ if (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 8.0)
+ add_compile_options("-Wno-error=class-memaccess")
+ endif()
+ if (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 9.1)
+ add_compile_options("-Wno-error=deprecated-copy")
+ endif()
+ endif()
+
include(src/3rdparty/opennurbs/openNURBS.cmake)
include(src/on_nurbs/on_nurbs.cmake)
endif()
)
set(LIB_NAME "pcl_${SUBSYS_NAME}")
+
include_directories(
"${CMAKE_CURRENT_SOURCE_DIR}/include"
"${CMAKE_CURRENT_SOURCE_DIR}"
)
-include_directories(SYSTEM
- ${VTK_INCLUDE_DIRS}
-)
-link_directories(${VTK_LIBRARY_DIRS})
PCL_ADD_LIBRARY(${LIB_NAME} COMPONENT ${SUBSYS_NAME} SOURCES ${srcs} ${incs} ${impl_incs} ${VTK_SMOOTHING_INCLUDES} ${POISSON_INCLUDES} ${OPENNURBS_INCLUDES} ${ON_NURBS_INCLUDES})
-target_link_libraries("${LIB_NAME}" pcl_common pcl_search pcl_kdtree pcl_octree ${VTK_LIBRARIES} ${ON_NURBS_LIBRARIES})
+
+target_link_libraries("${LIB_NAME}" pcl_common pcl_search pcl_kdtree pcl_octree ${ON_NURBS_LIBRARIES})
+
+if(VTK_FOUND)
+ if(${VTK_VERSION} VERSION_LESS 9.0)
+ include_directories(SYSTEM ${VTK_INCLUDE_DIRS})
+ link_directories(${VTK_LIBRARY_DIRS})
+ target_link_libraries("${LIB_NAME}" ${VTK_LIBRARIES})
+ else()
+ target_link_libraries("${LIB_NAME}" VTK::CommonDataModel
+ VTK::CommonExecutionModel
+ VTK::FiltersModeling
+ VTK::FiltersCore)
+ endif()
+endif()
+
if(QHULL_FOUND)
target_link_libraries("${LIB_NAME}" ${QHULL_LIBRARIES})
endif()
+
PCL_MAKE_PKGCONFIG(${LIB_NAME} COMPONENT ${SUBSYS_NAME} DESC ${SUBSYS_DESC} PCL_DEPS ${SUBSYS_DEPS})
# Install include files
if(VTK_FOUND AND NOT ANDROID)
PCL_ADD_INCLUDES("${SUBSYS_NAME}" "${SUBSYS_NAME}/vtk_smoothing" ${VTK_SMOOTHING_INCLUDES})
endif()
+
if(WIN32)
- target_link_libraries("${LIB_NAME}" Rpcrt4.lib)
+ target_link_libraries("${LIB_NAME}" rpcrt4.lib)
endif()
#ifndef MARCHING_CUBES_INCLUDED
#define MARCHING_CUBES_INCLUDED
#include <pcl/pcl_macros.h>
-#include <vector>
#include "geometry.h"
normal[0] = (*input_)[cnt].normal_x;
normal[1] = (*input_)[cnt].normal_y;
normal[2] = (*input_)[cnt].normal_z;
+ cnt++;
for( i=0 ; i<DIMENSION ; i++ ) position[i] = ( position[i]-center[i] ) / scale;
myCenter[0] = myCenter[1] = myCenter[2] = Real(0.5);
d++;
}
NonLinearUpdateWeightContribution( temp , position , weight );
- cnt++;
}
}
*/
#include <stdlib.h>
-#include <math.h>
#include <algorithm>
#include "poisson_exceptions.h"
#include <float.h>
#include <math.h>
-#include <algorithm>
-
#include <cstdio>
#include <cstring>
#include "factor.h"
#include <cstdio>
+#include <cstdlib> // for malloc, needed by gcc-5
#include <cstring>
////////////////////////
DAMAGE.
*/
-#include <float.h>
#ifdef _WIN32
# ifndef WIN32_LEAN_AND_MEAN
# define WIN32_LEAN_AND_MEAN
#if defined __GNUC__
# pragma GCC system_header
#endif
+PCL_DEPRECATED_HEADER(1, 15, "Please include the needed boost headers directly.")
#include <boost/dynamic_bitset/dynamic_bitset.hpp>
#include <pcl/pcl_config.h>
#ifdef HAVE_QHULL
-#include <pcl/surface/convex_hull.h>
+#include <pcl/surface/reconstruction.h> // for MeshConstruction
namespace pcl
{
// PCL includes
#include <pcl/surface/reconstruction.h>
-#include <pcl/ModelCoefficients.h>
#include <pcl/PolygonMesh.h>
namespace pcl
using PointCloudConstPtr = typename PointCloud::ConstPtr;
/** \brief Empty constructor. */
- ConvexHull () : compute_area_ (false), total_area_ (0), total_volume_ (0), dimension_ (0),
+ ConvexHull () : compute_area_ (false), total_area_ (0), total_volume_ (0), dimension_ (0),
projection_angle_thresh_ (std::cos (0.174532925) ), qhull_flags ("qhull "),
x_axis_ (1.0, 0.0, 0.0), y_axis_ (0.0, 1.0, 0.0), z_axis_ (0.0, 0.0, 1.0)
{
- };
+ }
/** \brief Empty destructor */
~ConvexHull () {}
* \param[in] vertices the vertices representing the polygon
*/
float
- area (const std::vector<std::uint32_t>& vertices);
+ area (const Indices& vertices);
+
+ /** \brief Compute the signed area of a polygon.
+ * \param[in] vertices the vertices representing the polygon
+ */
+ template <typename T = pcl::index_t, std::enable_if_t<!std::is_same<T, std::uint32_t>::value, pcl::index_t> = 0>
+ PCL_DEPRECATED(1, 14, "This method creates a useless copy of the vertices vector. Use area method which accepts Indices instead")
+ float
+ area (const std::vector<std::uint32_t>& vertices)
+ {
+ return area(Indices (vertices.cbegin(), vertices.cend()));
+ }
/** \brief Check if the triangle (u,v,w) is an ear.
* \param[in] u the first triangle vertex
* \param[in] vertices a set of input vertices
*/
bool
- isEar (int u, int v, int w, const std::vector<std::uint32_t>& vertices);
+ isEar (int u, int v, int w, const Indices& vertices);
+
+ /** \brief Check if the triangle (u,v,w) is an ear.
+ * \param[in] u the first triangle vertex
+ * \param[in] v the second triangle vertex
+ * \param[in] w the third triangle vertex
+ * \param[in] vertices a set of input vertices
+ */
+ template <typename T = pcl::index_t, std::enable_if_t<!std::is_same<T, std::uint32_t>::value, pcl::index_t> = 0>
+ PCL_DEPRECATED(1, 14, "This method creates a useless copy of the vertices vector. Use isEar method which accepts Indices instead")
+ bool
+ isEar (int u, int v, int w, const std::vector<std::uint32_t>& vertices)
+ {
+ return isEar(u, v, w, Indices (vertices.cbegin(), vertices.cend()));
+ }
/** \brief Check if p is inside the triangle (u,v,w).
* \param[in] u the first triangle vertex
#if defined __GNUC__
# pragma GCC system_header
#endif
+PCL_DEPRECATED_HEADER(1, 15, "Please include the needed eigen headers directly.")
#include <Eigen/SVD>
#include <fstream>
-
+#include <Eigen/Geometry> // for cross
namespace pcl
{
/** \brief Get the sfn list. */
- inline std::vector<int>
+ inline pcl::Indices
getSFN () const { return (sfn_); }
/** \brief Get the ffn list. */
- inline std::vector<int>
+ inline pcl::Indices
getFFN () const { return (ffn_); }
protected:
struct nnAngle
{
double angle;
- int index;
- int nnIndex;
+ pcl::index_t index;
+ pcl::index_t nnIndex;
bool visible;
};
/** \brief A list of angles to neighbors **/
std::vector<nnAngle> angles_;
/** \brief Index of the current query point **/
- int R_;
+ pcl::index_t R_;
/** \brief List of point states **/
std::vector<int> state_;
/** \brief List of sources **/
- std::vector<int> source_;
+ pcl::Indices source_;
/** \brief List of fringe neighbors in one direction **/
- std::vector<int> ffn_;
+ pcl::Indices ffn_;
/** \brief List of fringe neighbors in other direction **/
- std::vector<int> sfn_;
+ pcl::Indices sfn_;
/** \brief Connected component labels for each point **/
std::vector<int> part_;
/** \brief Points on the outer edge from which the mesh has to be grown **/
/** \brief Flag to set if the current point is free **/
bool is_current_free_;
/** \brief Current point's index **/
- int current_index_;
+ pcl::index_t current_index_;
/** \brief Flag to set if the previous point is the first fringe neighbor **/
bool prev_is_ffn_;
/** \brief Flag to set if the next point is the second fringe neighbor **/
/** \brief Flag to set if the second fringe neighbor was changed **/
bool changed_2nd_fn_;
/** \brief New boundary point **/
- int new2boundary_;
+ pcl::index_t new2boundary_;
/** \brief Flag to set if the next neighbor was already connected in the previous step.
* To avoid inconsistency it should not be connected again.
*/
void
connectPoint (std::vector<pcl::Vertices> &polygons,
- const int prev_index,
- const int next_index,
- const int next_next_index,
+ const pcl::index_t prev_index,
+ const pcl::index_t next_index,
+ const pcl::index_t next_next_index,
const Eigen::Vector2f &uvn_current,
const Eigen::Vector2f &uvn_prev,
const Eigen::Vector2f &uvn_next);
* \param[out] polygons the polygon mesh to be updated
*/
inline void
- addTriangle (int a, int b, int c, std::vector<pcl::Vertices> &polygons)
+ addTriangle (pcl::index_t a, pcl::index_t b, pcl::index_t c, std::vector<pcl::Vertices> &polygons)
{
triangle_.vertices.resize (3);
if (consistent_ordering_)
#include <pcl/memory.h>
#include <pcl/pcl_macros.h>
-#include <pcl/surface/boost.h>
#include <pcl/surface/reconstruction.h>
+#include <boost/dynamic_bitset/dynamic_bitset.hpp> // for dynamic_bitset
#include <unordered_map>
namespace pcl
{
Leaf () {}
- std::vector<int> data_indices;
+ pcl::Indices data_indices;
Eigen::Vector4f pt_on_surface;
Eigen::Vector3f vect_at_grid_pt;
};
* \param pt_union_indices the union of input data points within the cell and padding cells
*/
void
- getDataPtsUnion (const Eigen::Vector3i &index, std::vector <int> &pt_union_indices);
+ getDataPtsUnion (const Eigen::Vector3i &index, pcl::Indices &pt_union_indices);
/** \brief Given the index of a cell, exam it's up, left, front edges, and add
* the vectices to m_surface list.the up, left, front edges only share 4
* \param pt_union_indices the union of input data points within the cell and padding cells
*/
void
- createSurfaceForCell (const Eigen::Vector3i &index, std::vector <int> &pt_union_indices);
+ createSurfaceForCell (const Eigen::Vector3i &index, pcl::Indices &pt_union_indices);
/** \brief Given the coordinates of one point, project it onto the surface,
* \param projection the resultant point projected
*/
void
- getProjection (const Eigen::Vector4f &p, std::vector<int> &pt_union_indices, Eigen::Vector4f &projection);
+ getProjection (const Eigen::Vector4f &p, pcl::Indices &pt_union_indices, Eigen::Vector4f &projection);
/** \brief Given the coordinates of one point, project it onto the surface,
* return the projected point. Find the plane which fits all the points in
*/
void
getProjectionWithPlaneFit (const Eigen::Vector4f &p,
- std::vector<int> &pt_union_indices,
+ pcl::Indices &pt_union_indices,
Eigen::Vector4f &projection);
*/
void
getVectorAtPoint (const Eigen::Vector4f &p,
- std::vector <int> &pt_union_indices, Eigen::Vector3f &vo);
+ pcl::Indices &pt_union_indices, Eigen::Vector3f &vo);
/** \brief Given the location of a point, get it's vector
* \param p the coordinates of the input point
*/
void
getVectorAtPointKNN (const Eigen::Vector4f &p,
- std::vector<int> &k_indices,
+ pcl::Indices &k_indices,
std::vector<float> &k_squared_distances,
Eigen::Vector3f &vo);
* \param pt_union_indices the union of input data points within the cell and padding cells
*/
double
- getMagAtPoint (const Eigen::Vector4f &p, const std::vector <int> &pt_union_indices);
+ getMagAtPoint (const Eigen::Vector4f &p, const pcl::Indices &pt_union_indices);
/** \brief Get the 1st derivative
* \param p the coordinate of the input point
*/
double
getD1AtPoint (const Eigen::Vector4f &p, const Eigen::Vector3f &vec,
- const std::vector <int> &pt_union_indices);
+ const pcl::Indices &pt_union_indices);
/** \brief Get the 2nd derivative
* \param p the coordinate of the input point
*/
double
getD2AtPoint (const Eigen::Vector4f &p, const Eigen::Vector3f &vec,
- const std::vector <int> &pt_union_indices);
+ const pcl::Indices &pt_union_indices);
/** \brief Test whether the edge is intersected by the surface by
* doing the dot product of the vector at two end points. Also test
bool
isIntersected (const std::vector<Eigen::Vector4f, Eigen::aligned_allocator<Eigen::Vector4f> > &end_pts,
std::vector<Eigen::Vector3f, Eigen::aligned_allocator<Eigen::Vector3f> > &vect_at_end_pts,
- std::vector <int> &pt_union_indices);
+ pcl::Indices &pt_union_indices);
/** \brief Find point where the edge intersects the surface.
* \param level binary search level
const std::vector<Eigen::Vector4f, Eigen::aligned_allocator<Eigen::Vector4f> > &end_pts,
const std::vector<Eigen::Vector3f, Eigen::aligned_allocator<Eigen::Vector3f> > &vect_at_end_pts,
const Eigen::Vector4f &start_pt,
- std::vector<int> &pt_union_indices,
+ pcl::Indices &pt_union_indices,
Eigen::Vector4f &intersection);
/** \brief Go through all the entries in the hash table and update the
*/
void
storeVectAndSurfacePoint (int index_1d, const Eigen::Vector3i &index_3d,
- std::vector<int> &pt_union_indices, const Leaf &cell_data);
+ pcl::Indices &pt_union_indices, const Leaf &cell_data);
/** \brief Go through all the entries in the hash table and update the cellData.
* When creating the hash table, the pt_on_surface field store the center point
#include <algorithm>
#include <pcl/console/print.h>
+#include <Eigen/LU> // for inverse
+
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointInT, typename PointOutT> void
pcl::BilateralUpsampling<PointInT, PointOutT>::process (pcl::PointCloud<PointOutT> &output)
if (!initCompute ())
{
output.width = output.height = 0;
- output.points.clear ();
+ output.clear ();
return;
}
if (alpha_ <= 0)
{
PCL_ERROR ("[pcl::%s::reconstruct] Alpha parameter must be set to a positive number!\n", getClassName ().c_str ());
- output.points.clear ();
+ output.clear ();
return;
}
if (!initCompute ())
{
- output.points.clear ();
+ output.clear ();
return;
}
if (alpha_ <= 0)
{
PCL_ERROR ("[pcl::%s::reconstruct] Alpha parameter must be set to a positive number!\n", getClassName ().c_str ());
- output.points.clear ();
+ output.clear ();
return;
}
if (!initCompute ())
{
- output.points.clear ();
+ output.clear ();
return;
}
points[i * dim_ + 2] = static_cast<coordT> (cloud_transformed[i].z);
}
+ qhT qh_qh;
+ qhT* qh = &qh_qh;
+ QHULL_LIB_CHECK
+ qh_zero(qh, errfile);
+
// Compute concave hull
- exitcode = qh_new_qhull (dim_, static_cast<int> (cloud_transformed.size ()), points, ismalloc, flags, outfile, errfile);
+ exitcode = qh_new_qhull (qh, dim_, static_cast<int> (cloud_transformed.size ()), points, ismalloc, flags, outfile, errfile);
if (exitcode != 0)
{
PCL_ERROR ("[pcl::%s::performReconstruction] ERROR: point cloud contains NaN values, consider running pcl::PassThrough filter first to remove NaNs!\n", getClassName ().c_str ());
}
- alpha_shape.points.resize (0);
+ alpha_shape.resize (0);
alpha_shape.width = alpha_shape.height = 0;
polygons.resize (0);
- qh_freeqhull (!qh_ALL);
+ qh_freeqhull (qh, !qh_ALL);
int curlong, totlong;
- qh_memfreeshort (&curlong, &totlong);
+ qh_memfreeshort (qh, &curlong, &totlong);
return;
}
- qh_setvoronoi_all ();
+ qh_setvoronoi_all (qh);
- int num_vertices = qh num_vertices;
- alpha_shape.points.resize (num_vertices);
+ int num_vertices = qh->num_vertices;
+ alpha_shape.resize (num_vertices);
vertexT *vertex;
// Max vertex id
++max_vertex_id;
std::vector<int> qhid_to_pcidx (max_vertex_id);
- int num_facets = qh num_facets;
+ int num_facets = qh->num_facets;
if (dim_ == 3)
{
- setT *triangles_set = qh_settemp (4 * num_facets);
+ setT *triangles_set = qh_settemp (qh, 4 * num_facets);
if (voronoi_centers_)
voronoi_centers_->points.resize (num_facets);
if (r <= alpha_)
{
// all triangles in tetrahedron are good, add them all to the alpha shape (triangles_set)
- qh_makeridges (facet);
+ qh_makeridges (qh, facet);
facet->good = true;
- facet->visitid = qh visit_id;
+ facet->visitid = qh->visit_id;
ridgeT *ridge, **ridgep;
FOREACHridge_ (facet->ridges)
{
facetT *neighb = otherfacet_ (ridge, facet);
- if ((neighb->visitid != qh visit_id))
- qh_setappend (&triangles_set, ridge);
+ if ((neighb->visitid != qh->visit_id))
+ qh_setappend (qh, &triangles_set, ridge);
}
}
else
{
// consider individual triangles from the tetrahedron...
facet->good = false;
- facet->visitid = qh visit_id;
- qh_makeridges (facet);
+ facet->visitid = qh->visit_id;
+ qh_makeridges (qh, facet);
ridgeT *ridge, **ridgep;
FOREACHridge_ (facet->ridges)
{
facetT *neighb;
neighb = otherfacet_ (ridge, facet);
- if ((neighb->visitid != qh visit_id))
+ if ((neighb->visitid != qh->visit_id))
{
// check if individual triangle is good and add it to triangles_set
double r = pcl::getCircumcircleRadius (a, b, c);
if (r <= alpha_)
- qh_setappend (&triangles_set, ridge);
+ qh_setappend (qh, &triangles_set, ridge);
}
}
}
{
polygons[triangles].vertices.resize (3);
int vertex_n, vertex_i;
- FOREACHvertex_i_ ((*ridge).vertices) //3 vertices per ridge!
+ FOREACHvertex_i_ (qh, (*ridge).vertices) //3 vertices per ridge!
{
if (!added_vertices[vertex->id])
{
}
}
- alpha_shape.points.resize (vertices);
+ alpha_shape.resize (vertices);
alpha_shape.width = alpha_shape.size ();
alpha_shape.height = 1;
}
{
// Compute the alpha complex for the set of points
// Filters the delaunay triangles
- setT *edges_set = qh_settemp (3 * num_facets);
+ setT *edges_set = qh_settemp (qh, 3 * num_facets);
if (voronoi_centers_)
voronoi_centers_->points.resize (num_facets);
if (r <= alpha_)
{
pcl::Vertices facet_vertices; //TODO: is not used!!
- qh_makeridges (facet);
+ qh_makeridges (qh, facet);
facet->good = true;
ridgeT *ridge, **ridgep;
FOREACHridge_ (facet->ridges)
- qh_setappend (&edges_set, ridge);
+ qh_setappend (qh, &edges_set, ridge);
if (voronoi_centers_)
{
std::vector<int> pcd_indices;
pcd_indices.resize (2);
- FOREACHvertex_i_ ((*ridge).vertices) //in 2-dim, 2 vertices per ridge!
+ FOREACHvertex_i_ (qh, (*ridge).vertices) //in 2-dim, 2 vertices per ridge!
{
if (!added_vertices[vertex->id])
{
}
}
- alpha_shape.points.resize (vertices);
+ alpha_shape.resize (vertices);
PointCloud alpha_shape_sorted;
- alpha_shape_sorted.points.resize (vertices);
+ alpha_shape_sorted.resize (vertices);
// iterate over edges until they are empty!
std::map<int, std::vector<int> >::iterator curr = edges.begin ();
{
alpha_shape_sorted[sorted_idx] = alpha_shape[(*curr).first];
// check where we can go from (*curr).first
- for (const int &i : (*curr).second)
+ for (const auto &i : (*curr).second)
{
if (!used[i])
{
voronoi_centers_->points.resize (dd);
}
- qh_freeqhull (!qh_ALL);
+ qh_freeqhull (qh, !qh_ALL);
int curlong, totlong;
- qh_memfreeshort (&curlong, &totlong);
+ qh_memfreeshort (qh, &curlong, &totlong);
Eigen::Affine3d transInverse = transform1.inverse ();
pcl::transformPointCloud (alpha_shape, alpha_shape, transInverse);
pcl::KdTreeFLANN<PointInT> tree (true);
tree.setInputCloud (input_, indices_);
- std::vector<int> neighbor;
+ pcl::Indices neighbor;
std::vector<float> distances;
neighbor.resize (1);
distances.resize (1);
}
pcl::PointCloud<PointInT> normal_calc_cloud;
- normal_calc_cloud.points.resize (3);
+ normal_calc_cloud.resize (3);
normal_calc_cloud[0] = p0;
normal_calc_cloud[1] = p1;
normal_calc_cloud[2] = p2;
// output from qh_produce_output(), use NULL to skip qh_produce_output()
FILE *outfile = nullptr;
-#ifndef HAVE_QHULL_2011
if (compute_area_)
outfile = stderr;
-#endif
// option flags for qhull, see qh_opt.htm
const char* flags = qhull_flags.c_str ();
// This should only happen if we had invalid input
PCL_ERROR ("[pcl::%s::performReconstruction2D] Invalid input!\n", getClassName ().c_str ());
}
+
+ qhT qh_qh;
+ qhT* qh = &qh_qh;
+ QHULL_LIB_CHECK
+ qh_zero(qh, errfile);
// Compute convex hull
- int exitcode = qh_new_qhull (dimension, static_cast<int> (indices_->size ()), points, ismalloc, const_cast<char*> (flags), outfile, errfile);
-#ifdef HAVE_QHULL_2011
+ int exitcode = qh_new_qhull (qh, dimension, static_cast<int> (indices_->size ()), points, ismalloc, const_cast<char*> (flags), outfile, errfile);
if (compute_area_)
{
- qh_prepare_output();
+ qh_prepare_output(qh);
}
-#endif
// 0 if no error from qhull or it doesn't find any vertices
- if (exitcode != 0 || qh num_vertices == 0)
+ if (exitcode != 0 || qh->num_vertices == 0)
{
PCL_ERROR ("[pcl::%s::performReconstrution2D] ERROR: qhull was unable to compute a convex hull for the given point cloud (%lu)!\n", getClassName ().c_str (), indices_->size ());
- hull.points.resize (0);
+ hull.resize (0);
hull.width = hull.height = 0;
polygons.resize (0);
- qh_freeqhull (!qh_ALL);
+ qh_freeqhull (qh, !qh_ALL);
int curlong, totlong;
- qh_memfreeshort (&curlong, &totlong);
+ qh_memfreeshort (qh, &curlong, &totlong);
return;
}
// Qhull returns the area in volume for 2D
if (compute_area_)
{
- total_area_ = qh totvol;
+ total_area_ = qh->totvol;
total_volume_ = 0.0;
}
- int num_vertices = qh num_vertices;
- hull.points.resize (num_vertices);
- memset (&hull.points[0], hull.size (), sizeof (PointInT));
+ int num_vertices = qh->num_vertices;
+
+ hull.clear();
+ hull.resize(num_vertices, PointInT{});
vertexT * vertex;
int i = 0;
- std::vector<std::pair<int, Eigen::Vector4f>, Eigen::aligned_allocator<std::pair<int, Eigen::Vector4f> > > idx_points (num_vertices);
- idx_points.resize (hull.size ());
- memset (&idx_points[0], hull.size (), sizeof (std::pair<int, Eigen::Vector4f>));
+ AlignedVector<std::pair<int, Eigen::Vector4f>> idx_points (num_vertices);
FORALLvertices
{
- hull[i] = (*input_)[(*indices_)[qh_pointid (vertex->point)]];
- idx_points[i].first = qh_pointid (vertex->point);
+ hull[i] = (*input_)[(*indices_)[qh_pointid (qh, vertex->point)]];
+ idx_points[i].first = qh_pointid (qh, vertex->point);
++i;
}
polygons[0].vertices[j] = static_cast<unsigned int> (j);
}
- qh_freeqhull (!qh_ALL);
+ qh_freeqhull (qh, !qh_ALL);
int curlong, totlong;
- qh_memfreeshort (&curlong, &totlong);
+ qh_memfreeshort (qh, &curlong, &totlong);
hull.width = hull.size ();
hull.height = 1;
// output from qh_produce_output(), use NULL to skip qh_produce_output()
FILE *outfile = nullptr;
-#ifndef HAVE_QHULL_2011
if (compute_area_)
outfile = stderr;
-#endif
// option flags for qhull, see qh_opt.htm
const char *flags = qhull_flags.c_str ();
points[j + 2] = static_cast<coordT> ((*input_)[(*indices_)[i]].z);
}
+ qhT qh_qh;
+ qhT* qh = &qh_qh;
+ QHULL_LIB_CHECK
+ qh_zero(qh, errfile);
+
// Compute convex hull
- int exitcode = qh_new_qhull (dimension, static_cast<int> (indices_->size ()), points, ismalloc, const_cast<char*> (flags), outfile, errfile);
-#ifdef HAVE_QHULL_2011
+ int exitcode = qh_new_qhull (qh, dimension, static_cast<int> (indices_->size ()), points, ismalloc, const_cast<char*> (flags), outfile, errfile);
if (compute_area_)
{
- qh_prepare_output();
+ qh_prepare_output(qh);
}
-#endif
// 0 if no error from qhull
if (exitcode != 0)
getClassName().c_str(),
static_cast<std::size_t>(input_->size()));
- hull.points.resize (0);
+ hull.resize (0);
hull.width = hull.height = 0;
polygons.resize (0);
- qh_freeqhull (!qh_ALL);
+ qh_freeqhull (qh, !qh_ALL);
int curlong, totlong;
- qh_memfreeshort (&curlong, &totlong);
+ qh_memfreeshort (qh, &curlong, &totlong);
return;
}
- qh_triangulate ();
+ qh_triangulate (qh);
- int num_facets = qh num_facets;
+ int num_facets = qh->num_facets;
- int num_vertices = qh num_vertices;
- hull.points.resize (num_vertices);
+ int num_vertices = qh->num_vertices;
+ hull.resize (num_vertices);
vertexT * vertex;
int i = 0;
FORALLvertices
{
// Add vertices to hull point_cloud and store index
- hull_indices_.indices.push_back ((*indices_)[qh_pointid (vertex->point)]);
+ hull_indices_.indices.push_back ((*indices_)[qh_pointid (qh, vertex->point)]);
hull[i] = (*input_)[hull_indices_.indices.back ()];
qhid_to_pcidx[vertex->id] = i; // map the vertex id of qhull to the point cloud index
if (compute_area_)
{
- total_area_ = qh totarea;
- total_volume_ = qh totvol;
+ total_area_ = qh->totarea;
+ total_volume_ = qh->totvol;
}
if (fill_polygon_data)
// Needed by FOREACHvertex_i_
int vertex_n, vertex_i;
- FOREACHvertex_i_ ((*facet).vertices)
+ FOREACHvertex_i_ (qh, (*facet).vertices)
//facet_vertices.vertices.push_back (qhid_to_pcidx[vertex->id]);
polygons[dd].vertices[vertex_i] = qhid_to_pcidx[vertex->id];
++dd;
}
}
// Deallocates memory (also the points)
- qh_freeqhull (!qh_ALL);
+ qh_freeqhull (qh, !qh_ALL);
int curlong, totlong;
- qh_memfreeshort (&curlong, &totlong);
+ qh_memfreeshort (qh, &curlong, &totlong);
hull.width = hull.size ();
hull.height = 1;
points.header = input_->header;
if (!initCompute () || input_->points.empty () || indices_->empty ())
{
- points.points.clear ();
+ points.clear ();
return;
}
points.header = input_->header;
if (!initCompute () || input_->points.empty () || indices_->empty ())
{
- points.points.clear ();
+ points.clear ();
return;
}
nnn_ = static_cast<int> (indices_->size ());
// Variables to hold the results of nearest neighbor searches
- std::vector<int> nnIdx (nnn_);
+ pcl::Indices nnIdx (nnn_);
std::vector<float> sqrDists (nnn_);
// current number of connected components
if (!input_->is_dense)
{
// Skip invalid points from the indices list
- for (std::vector<int>::const_iterator it = indices_->begin (); it != indices_->end (); ++it)
- if (!std::isfinite ((*input_)[*it].x) ||
- !std::isfinite ((*input_)[*it].y) ||
- !std::isfinite ((*input_)[*it].z))
- state_[*it] = NONE;
+ for (const auto& idx : (*indices_))
+ if (!std::isfinite ((*input_)[idx].x) ||
+ !std::isfinite ((*input_)[idx].y) ||
+ !std::isfinite ((*input_)[idx].z))
+ state_[idx] = NONE;
}
// Saving coordinates and point to index mapping
angles_[i].index = nnIdx[i];
if (
(state_[nnIdx[i]] == COMPLETED) || (state_[nnIdx[i]] == BOUNDARY)
- || (state_[nnIdx[i]] == NONE) || (nnIdx[i] == -1) /// NOTE: discarding NaN points and those that are not in indices_
+ || (state_[nnIdx[i]] == NONE) || (nnIdx[i] == UNAVAILABLE) /// NOTE: discarding NaN points and those that are not in indices_
|| (sqrDists[i] > sqr_dist_threshold)
)
angles_[i].visible = false;
angles_[i].nnIndex = i;
if (
(state_[nnIdx[i]] == COMPLETED) || (state_[nnIdx[i]] == BOUNDARY)
- || (state_[nnIdx[i]] == NONE) || (nnIdx[i] == -1) /// NOTE: discarding NaN points and those that are not in indices_
+ || (state_[nnIdx[i]] == NONE) || (nnIdx[i] == UNAVAILABLE) /// NOTE: discarding NaN points and those that are not in indices_
|| (sqrDists[i] > sqr_dist_threshold)
)
angles_[i].visible = false;
{
if (doubleEdges[j].index != i)
{
- int f = ffn_[nnIdx[doubleEdges[j].index]];
+ const auto& f = ffn_[nnIdx[doubleEdges[j].index]];
if ((f != nnIdx[i]) && (f != R_))
visibility = isVisible(uvn_nn[i], uvn_nn[doubleEdges[j].index], doubleEdges[j].first, Eigen::Vector2f::Zero());
if (!visibility)
break;
- int s = sfn_[nnIdx[doubleEdges[j].index]];
+ const auto& s = sfn_[nnIdx[doubleEdges[j].index]];
if ((s != nnIdx[i]) && (s != R_))
visibility = isVisible(uvn_nn[i], uvn_nn[doubleEdges[j].index], doubleEdges[j].second, Eigen::Vector2f::Zero());
if (!visibility)
else
angle_so_far = 0;
}
- for (const int &it : to_erase)
+ for (const auto &idx : to_erase)
{
for (std::vector<int>::iterator iter = angleIdx.begin(); iter != angleIdx.end(); ++iter)
- if (it == *iter)
+ if (idx == *iter)
{
angleIdx.erase(iter);
break;
template <typename PointInT> void
pcl::GreedyProjectionTriangulation<PointInT>::connectPoint (
std::vector<pcl::Vertices> &polygons,
- const int prev_index, const int next_index, const int next_next_index,
+ const pcl::index_t prev_index, const pcl::index_t next_index, const pcl::index_t next_next_index,
const Eigen::Vector2f &uvn_current,
const Eigen::Vector2f &uvn_prev,
const Eigen::Vector2f &uvn_next)
case 2://next2f:
{
addTriangle (current_index_, ffn_[current_index_], next_index, polygons);
- int neighbor_update = next_index;
+ auto neighbor_update = next_index;
/* updating next_index */
if (state_[next_index] <= FREE)
case 3://next2s:
{
addTriangle (current_index_, sfn_[current_index_], next_index, polygons);
- int neighbor_update = next_index;
+ auto neighbor_update = next_index;
/* updating next_index */
if (state_[next_index] <= FREE)
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointNT> void
pcl::GridProjection<PointNT>::getDataPtsUnion (const Eigen::Vector3i &index,
- std::vector <int> &pt_union_indices)
+ pcl::Indices &pt_union_indices)
{
for (int i = index[0] - padding_size_; i <= index[0] + padding_size_; ++i)
{
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointNT> void
pcl::GridProjection<PointNT>::createSurfaceForCell (const Eigen::Vector3i &index,
- std::vector <int> &pt_union_indices)
+ pcl::Indices &pt_union_indices)
{
// 8 vertices of the cell
std::vector<Eigen::Vector4f, Eigen::aligned_allocator<Eigen::Vector4f> > vertices (8);
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointNT> void
pcl::GridProjection<PointNT>::getProjection (const Eigen::Vector4f &p,
- std::vector <int> &pt_union_indices, Eigen::Vector4f &projection)
+ pcl::Indices &pt_union_indices, Eigen::Vector4f &projection)
{
const double projection_distance = leaf_size_ * 3;
std::vector<Eigen::Vector4f, Eigen::aligned_allocator<Eigen::Vector4f> > end_pt (2);
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointNT> void
pcl::GridProjection<PointNT>::getProjectionWithPlaneFit (const Eigen::Vector4f &p,
- std::vector<int> (&pt_union_indices),
+ pcl::Indices (&pt_union_indices),
Eigen::Vector4f &projection)
{
// Compute the plane coefficients
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointNT> void
pcl::GridProjection<PointNT>::getVectorAtPoint (const Eigen::Vector4f &p,
- std::vector <int> &pt_union_indices,
+ pcl::Indices &pt_union_indices,
Eigen::Vector3f &vo)
{
std::vector <double> pt_union_dist (pt_union_indices.size ());
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointNT> void
pcl::GridProjection<PointNT>::getVectorAtPointKNN (const Eigen::Vector4f &p,
- std::vector <int> &k_indices,
+ pcl::Indices &k_indices,
std::vector <float> &k_squared_distances,
Eigen::Vector3f &vo)
{
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointNT> double
pcl::GridProjection<PointNT>::getMagAtPoint (const Eigen::Vector4f &p,
- const std::vector <int> &pt_union_indices)
+ const pcl::Indices &pt_union_indices)
{
std::vector <double> pt_union_dist (pt_union_indices.size ());
- std::vector <double> pt_union_weight (pt_union_indices.size ());
double sum = 0.0;
for (std::size_t i = 0; i < pt_union_indices.size (); ++i)
{
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointNT> double
pcl::GridProjection<PointNT>::getD1AtPoint (const Eigen::Vector4f &p, const Eigen::Vector3f &vec,
- const std::vector <int> &pt_union_indices)
+ const pcl::Indices &pt_union_indices)
{
double sz = 0.01 * leaf_size_;
Eigen::Vector3f v = vec * static_cast<float> (sz);
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointNT> double
pcl::GridProjection<PointNT>::getD2AtPoint (const Eigen::Vector4f &p, const Eigen::Vector3f &vec,
- const std::vector <int> &pt_union_indices)
+ const pcl::Indices &pt_union_indices)
{
double sz = 0.01 * leaf_size_;
Eigen::Vector3f v = vec * static_cast<float> (sz);
template <typename PointNT> bool
pcl::GridProjection<PointNT>::isIntersected (const std::vector<Eigen::Vector4f, Eigen::aligned_allocator<Eigen::Vector4f> > &end_pts,
std::vector<Eigen::Vector3f, Eigen::aligned_allocator<Eigen::Vector3f> > &vect_at_end_pts,
- std::vector <int> &pt_union_indices)
+ pcl::Indices &pt_union_indices)
{
assert (end_pts.size () == 2);
assert (vect_at_end_pts.size () == 2);
const std::vector<Eigen::Vector4f, Eigen::aligned_allocator<Eigen::Vector4f> > &end_pts,
const std::vector<Eigen::Vector3f, Eigen::aligned_allocator<Eigen::Vector3f> > &vect_at_end_pts,
const Eigen::Vector4f &start_pt,
- std::vector <int> &pt_union_indices,
+ pcl::Indices &pt_union_indices,
Eigen::Vector4f &intersection)
{
assert (end_pts.size () == 2);
template <typename PointNT> void
pcl::GridProjection<PointNT>::storeVectAndSurfacePoint (int index_1d,
const Eigen::Vector3i &,
- std::vector <int> &pt_union_indices,
+ pcl::Indices &pt_union_indices,
const Leaf &cell_data)
{
// Get point on grid
cell_center.y () + static_cast<float> (leaf_size_) / 2.0f,
cell_center.z () + static_cast<float> (leaf_size_) / 2.0f, 0.0f);
- std::vector <int> k_indices;
+ pcl::Indices k_indices;
k_indices.resize (k_);
std::vector <float> k_squared_distances;
k_squared_distances.resize (k_);
cell_hash_map_.rehash (data_->size () / static_cast<long unsigned int> (cell_hash_map_.max_load_factor ()));
// Go over all points and insert them into the right leaf
- for (int cp = 0; cp < static_cast<int> (data_->size ()); ++cp)
+ for (pcl::index_t cp = 0; cp < static_cast<pcl::index_t> (data_->size ()); ++cp)
{
// Check if the point is invalid
if (!std::isfinite ((*data_)[cp].x) ||
for (const auto &entry : cell_hash_map_)
{
getIndexIn3D (entry.first, index);
- std::vector <int> pt_union_indices;
+ pcl::Indices pt_union_indices;
getDataPtsUnion (index, pt_union_indices);
// Needs at least 10 points (?)
for (const auto &entry : cell_hash_map_)
{
getIndexIn3D (entry.first, index);
- std::vector <int> pt_union_indices;
+ pcl::Indices pt_union_indices;
getDataPtsUnion (index, pt_union_indices);
// Needs at least 10 points (?)
cloud.height = 1;
cloud.is_dense = true;
- cloud.points.resize (surface_.size ());
+ cloud.resize (surface_.size ());
// Copy the data from surface_ to cloud
for (std::size_t i = 0; i < cloud.size (); ++i)
{
{
PCL_ERROR ("[pcl::%s::performReconstruction] Invalid iso level %f! Please use a number between 0 and 1.\n",
getClassName ().c_str (), iso_level_);
- points.width = points.height = 0;
- points.points.clear ();
+ points.clear ();
polygons.clear ();
return;
}
#define PCL_SURFACE_IMPL_MARCHING_CUBES_HOPPE_H_
#include <pcl/surface/marching_cubes_hoppe.h>
-#include <pcl/common/common.h>
-#include <pcl/common/vector_average.h>
-#include <pcl/Vertices.h>
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointNT>
for (int z = 0; z < res_z_; ++z)
{
- std::vector<int> nn_indices (1, 0);
+ pcl::Indices nn_indices (1, 0);
std::vector<float> nn_sqr_dists (1, 0.0f);
const Eigen::Vector3f point = (lower_boundary_ + size_voxel_ * Eigen::Array3f (x, y, z)).matrix ();
PointNT p;
#define PCL_SURFACE_IMPL_MARCHING_CUBES_RBF_H_
#include <pcl/surface/marching_cubes_rbf.h>
-#include <pcl/common/common.h>
-#include <pcl/common/vector_average.h>
-#include <pcl/Vertices.h>
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointNT>
#include <pcl/type_traits.h>
#include <pcl/surface/mls.h>
-#include <pcl/common/io.h>
#include <pcl/common/common.h> // for getMinMax3D
#include <pcl/common/copy_point.h>
#include <pcl/common/centroid.h>
#include <pcl/common/eigen.h>
-#include <pcl/common/geometry.h>
#include <pcl/search/kdtree.h> // for KdTree
#include <pcl/search/organized.h> // for OrganizedNeighbor
+#include <Eigen/Geometry> // for cross
+#include <Eigen/LU> // for inverse
+
#ifdef _OPENMP
#include <omp.h>
#endif
// Copy the header
output.header = input_->header;
output.width = output.height = 0;
- output.points.clear ();
+ output.clear ();
if (search_radius_ <= 0 || sqr_gauss_param_ <= 0)
{
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointInT, typename PointOutT> void
-pcl::MovingLeastSquares<PointInT, PointOutT>::computeMLSPointNormal (int index,
- const std::vector<int> &nn_indices,
+pcl::MovingLeastSquares<PointInT, PointOutT>::computeMLSPointNormal (pcl::index_t index,
+ const pcl::Indices &nn_indices,
PointCloudOut &projected_points,
NormalCloud &projected_points_normals,
PointIndices &corresponding_input_indices,
}
template <typename PointInT, typename PointOutT> void
-pcl::MovingLeastSquares<PointInT, PointOutT>::addProjectedPointNormal (int index,
+pcl::MovingLeastSquares<PointInT, PointOutT>::addProjectedPointNormal (pcl::index_t index,
const Eigen::Vector3d &point,
const Eigen::Vector3d &normal,
double curvature,
{
// Allocate enough space to hold the results of nearest neighbor searches
// \note resize is irrelevant for a radiusSearch ().
- std::vector<int> nn_indices;
+ pcl::Indices nn_indices;
std::vector<float> nn_sqr_dists;
// Get the initial estimates of point positions and their neighborhoods
// Get 3D position of point
//Eigen::Vector3f pos = (*distinct_cloud_)[dp_i].getVector3fMap ();
- std::vector<int> nn_indices;
+ pcl::Indices nn_indices;
std::vector<float> nn_dists;
tree_->nearestKSearch ((*distinct_cloud_)[dp_i], 1, nn_indices, nn_dists);
- int input_index = nn_indices.front ();
+ const auto input_index = nn_indices.front ();
// If the closest point did not have a valid MLS fitting result
// OR if it is too far away from the sampled point
p.y = pos[1];
p.z = pos[2];
- std::vector<int> nn_indices;
+ pcl::Indices nn_indices;
std::vector<float> nn_dists;
tree_->nearestKSearch (p, 1, nn_indices, nn_dists);
- int input_index = nn_indices.front ();
+ const auto input_index = nn_indices.front ();
// If the closest point did not have a valid MLS fitting result
// OR if it is too far away from the sampled point
}
Eigen::Vector2f
-pcl::MLSResult::calculatePrincipleCurvatures (const double u, const double v) const
+pcl::MLSResult::calculatePrincipalCurvatures (const double u, const double v) const
{
Eigen::Vector2f k (1e-5, 1e-5);
// Note: this use the Monge Patch to derive the Gaussian curvature and Mean Curvature found here http://mathworld.wolfram.com/MongePatch.html
// Then:
// k1 = H + sqrt(H^2 - K)
- // k1 = H - sqrt(H^2 - K)
+ // k2 = H - sqrt(H^2 - K)
if (order > 1 && c_vec.size () >= (order + 1) * (order + 2) / 2 && std::isfinite (c_vec[0]))
{
const PolynomialPartialDerivative d = getPolynomialPartialDerivative (u, v);
}
else
{
- PCL_ERROR ("No Polynomial fit data, unable to calculate the principle curvatures!\n");
+ PCL_ERROR ("No Polynomial fit data, unable to calculate the principal curvatures!\n");
}
return (k);
template <typename PointT> void
pcl::MLSResult::computeMLSSurface (const pcl::PointCloud<PointT> &cloud,
- int index,
- const std::vector<int> &nn_indices,
+ pcl::index_t index,
+ const pcl::Indices &nn_indices,
double search_radius,
int polynomial_order,
std::function<double(const double)> weight_func)
}
// Go through neighbors, transform them in the local coordinate system,
- // save height and the evaluation of the polynome's terms
+ // save height and the evaluation of the polynomial's terms
for (std::size_t ni = 0; ni < static_cast<std::size_t>(num_neighbors); ++ni)
{
// Transforming coordinates
#define PCL_SURFACE_ORGANIZED_FAST_MESH_HPP_
#include <pcl/surface/organized_fast_mesh.h>
+#include <pcl/common/io.h> // for getFieldIndex
/////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointInT> void
, show_residual_ (false)
, min_iterations_ (8)
, solver_accuracy_ (1e-3f)
+ , threads_(1)
{
}
{
}
+//////////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointNT> void
+pcl::Poisson<PointNT>::setThreads (int threads)
+{
+ if (threads == 0)
+#ifdef _OPENMP
+ threads_ = omp_get_num_procs();
+#else
+ threads_ = 1;
+#endif
+ else
+ threads_ = threads;
+}
+
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointNT> template <int Degree> void
pcl::Poisson<PointNT>::execute (poisson::CoredVectorMeshData &mesh,
poisson::TreeNodeData::UseIndex = 1;
poisson::Octree<Degree> tree;
- /// TODO OPENMP stuff
- // tree.threads = Threads.value;
+
+ tree.threads = threads_;
center.coords[0] = center.coords[1] = center.coords[2] = 0;
// Write output PolygonMesh
pcl::PointCloud<pcl::PointXYZ> cloud;
- cloud.points.resize (int (mesh.outOfCorePointCount () + mesh.inCorePoints.size ()));
+ cloud.resize (int (mesh.outOfCorePointCount () + mesh.inCorePoints.size ()));
poisson::Point3D<float> p;
for (int i = 0; i < int (mesh.inCorePoints.size ()); i++)
{
if (!initCompute ())
{
output.width = output.height = 0;
- output.points.clear ();
+ output.clear ();
return;
}
#ifndef PCL_SURFACE_RECONSTRUCTION_IMPL_H_
#define PCL_SURFACE_RECONSTRUCTION_IMPL_H_
-#include <pcl/search/pcl_search.h>
+#include <pcl/conversions.h> // for toPCLPointCloud2
+#include <pcl/search/kdtree.h> // for KdTree
+#include <pcl/search/organized.h> // for OrganizedNeighbor
namespace pcl
#ifndef PCL_SURFACE_IMPL_SURFEL_SMOOTHING_H_
#define PCL_SURFACE_IMPL_SURFEL_SMOOTHING_H_
+#include <pcl/search/organized.h> // for OrganizedNeighbor
+#include <pcl/search/kdtree.h> // for KdTree
#include <pcl/surface/surfel_smoothing.h>
#include <pcl/common/distances.h>
+#include <pcl/console/print.h> // for PCL_ERROR, PCL_DEBUG
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT, typename PointNT> bool
output_normals = NormalCloudPtr (new NormalCloud);
output_normals->points.resize (interm_cloud_->size ());
- std::vector<int> nn_indices;
+ pcl::Indices nn_indices;
std::vector<float> nn_distances;
std::vector<float> diffs (interm_cloud_->size ());
float error_residual = error_residual_threshold_ + 1;
float last_error_residual = error_residual + 100.0f;
- std::vector<int> nn_indices;
+ pcl::Indices nn_indices;
std::vector<float> nn_distances;
int big_iterations = 0;
diffs[i] = (*cloud2_normals)[i].getNormalVector4fMap ().dot ((*cloud2)[i].getVector4fMap () -
(*interm_cloud_)[i].getVector4fMap ());
- std::vector<int> nn_indices;
+ pcl::Indices nn_indices;
std::vector<float> nn_distances;
output_features->resize (cloud2->size ());
bool largest = true;
bool smallest = true;
- for (const int &nn_index : nn_indices)
+ for (const auto &nn_index : nn_indices)
{
if (diffs[point_i] < diffs[nn_index])
largest = false;
}// end faces
// texture materials
- std::stringstream tex_name;
- tex_name << "material_" << m;
- tex_name >> tex_material_.tex_name;
+ tex_material_.tex_name = "material_" + std::to_string(m);
tex_material_.tex_file = tex_files_[m];
tex_mesh.tex_materials.push_back (tex_material_);
}// end faces
// texture materials
- std::stringstream tex_name;
- tex_name << "material_" << m;
- tex_name >> tex_material_.tex_name;
+ tex_material_.tex_name = "material_" + std::to_string(m);
tex_material_.tex_file = tex_files_[m];
tex_mesh.tex_materials.push_back (tex_material_);
{
Eigen::Vector2f tmp_VT;
// for each point of this face
- for (const unsigned int &vertex : tex_polygon.vertices)
+ for (const auto &vertex : tex_polygon.vertices)
{
// get point
PointInT pt = (*camera_transformed_cloud)[vertex];
}// end faces
// texture materials
- std::stringstream tex_name;
- tex_name << "material_" << m;
- tex_name >> tex_material_.tex_name;
+ tex_material_.tex_name = "material_" + std::to_string(m);
tex_material_.tex_file = current_cam.texture_file;
tex_mesh.tex_materials.push_back (tex_material_);
direction (1) = pt.y;
direction (2) = pt.z;
- std::vector<int> indices;
+ pcl::Indices indices;
PointCloudConstPtr cloud (new PointCloud());
cloud = octree->getInputCloud();
octree->getIntersectedVoxelIndices(direction, -direction, indices);
int nbocc = static_cast<int> (indices.size ());
- for (const int &index : indices)
+ for (const auto &index : indices)
{
// if intersected point is on the over side of the camera
if (pt.z * (*cloud)[index].z < 0)
template<typename PointInT> void
pcl::TextureMapping<PointInT>::removeOccludedPoints (const PointCloudPtr &input_cloud,
PointCloudPtr &filtered_cloud,
- const double octree_voxel_size, std::vector<int> &visible_indices,
- std::vector<int> &occluded_indices)
+ const double octree_voxel_size, pcl::Indices &visible_indices,
+ pcl::Indices &occluded_indices)
{
// variable used to filter occluded points by depth
double maxDeltaZ = octree_voxel_size;
// for each point of the cloud, raycast toward camera and check intersected voxels.
Eigen::Vector3f direction;
- std::vector<int> indices;
+ pcl::Indices indices;
for (std::size_t i = 0; i < input_cloud->size (); ++i)
{
direction (0) = (*input_cloud)[i].x;
octree.getIntersectedVoxelIndices (direction, -direction, indices);
int nbocc = static_cast<int> (indices.size ());
- for (const int &index : indices)
+ for (const auto &index : indices)
{
// if intersected point is on the over side of the camera
if ((*input_cloud)[i].z * (*input_cloud)[index].z < 0)
{
// point is added in the filtered mesh
filtered_cloud->points.push_back ((*input_cloud)[i]);
- visible_indices.push_back (static_cast<int> (i));
+ visible_indices.push_back (static_cast<pcl::index_t> (i));
}
else
{
- occluded_indices.push_back (static_cast<int> (i));
+ occluded_indices.push_back (static_cast<pcl::index_t> (i));
}
}
// load points into a PCL format
pcl::fromPCLPointCloud2 (tex_mesh.cloud, *cloud);
- std::vector<int> visible, occluded;
+ pcl::Indices visible, occluded;
removeOccludedPoints (cloud, filtered_cloud, octree_voxel_size, visible, occluded);
// Now that we know which points are visible, let's iterate over each face.
{
// check if all the face's points are visible
bool faceIsVisible = true;
- std::vector<int>::iterator it;
// iterate over face's vertex
- for (const unsigned int &vertex : tex_mesh.tex_polygons[polygons][faces].vertices)
+ for (const auto &vertex : tex_mesh.tex_polygons[polygons][faces].vertices)
{
- it = find (occluded.begin (), occluded.end (), vertex);
-
- if (it == occluded.end ())
+ if (find (occluded.begin (), occluded.end (), vertex) == occluded.end ())
{
// point is not in the occluded vector
// PCL_INFO (" VISIBLE!\n");
// load points into a PCL format
pcl::fromPCLPointCloud2 (tex_mesh.cloud, *cloud);
- std::vector<int> visible, occluded;
+ pcl::Indices visible, occluded;
removeOccludedPoints (cloud, filtered_cloud, octree_voxel_size, visible, occluded);
}
pcl::transformPointCloud (*original_cloud, *transformed_cloud, cam_trans.inverse ());
// find occlusions on transformed cloud
- std::vector<int> visible, occluded;
+ pcl::Indices visible, occluded;
removeOccludedPoints (transformed_cloud, filtered_cloud, octree_voxel_size, visible, occluded);
visible_pts = *filtered_cloud;
// ray direction
Eigen::Vector3f direction;
- std::vector<int> indices;
+ pcl::Indices indices;
// point from where we ray-trace
pcl::PointXYZI pt;
nbocc = static_cast<int> (indices.size ());
// TODO need to clean this up and find tricks to get remove aliasaing effect on planes
- for (const int &index : indices)
+ for (const auto &index : indices)
{
// if intersected point is on the over side of the camera
if (pt.z * (*input_cloud)[index].z < 0)
pcl::KdTreeFLANN<pcl::PointXY> kdtree;
kdtree.setInputCloud (projections);
- std::vector<int> idxNeighbors;
+ pcl::Indices idxNeighbors;
std::vector<float> neighborsSquaredDistance;
// af first (idx_pcan < current_cam), check if some of the faces attached to previous cameras occlude the current faces
// then (idx_pcam == current_cam), check for self occlusions. At this stage, we skip faces that were already marked as occluded
if (kdtree.radiusSearch (center, radius, idxNeighbors, neighborsSquaredDistance) > 0 )
{
// for each neighbor
- for (const int &idxNeighbor : idxNeighbors)
+ for (const auto &idxNeighbor : idxNeighbors)
{
if (std::max ((*camera_cloud)[mesh.tex_polygons[idx_pcam][idx_face].vertices[0]].z,
std::max ((*camera_cloud)[mesh.tex_polygons[idx_pcam][idx_face].vertices[1]].z,
#include <pcl/memory.h>
#include <pcl/pcl_macros.h>
-#include <pcl/surface/boost.h>
#include <pcl/surface/reconstruction.h>
namespace pcl
#include <pcl/memory.h>
#include <pcl/pcl_macros.h>
-#include <pcl/surface/boost.h>
#include <pcl/surface/marching_cubes.h>
namespace pcl
#include <pcl/memory.h>
#include <pcl/pcl_macros.h>
-#include <pcl/surface/boost.h>
#include <pcl/surface/marching_cubes.h>
namespace pcl
MLSProjectionResults () : u (0), v (0) {}
double u; /**< \brief The u-coordinate of the projected point in local MLS frame. */
- double v; /**< \brief The u-coordinate of the projected point in local MLS frame. */
+ double v; /**< \brief The v-coordinate of the projected point in local MLS frame. */
Eigen::Vector3d point; /**< \brief The projected point. */
Eigen::Vector3d normal; /**< \brief The projected point's normal. */
PCL_MAKE_ALIGNED_OPERATOR_NEW
const float a_curvature,
const int a_order);
- /** \brief Given a point calculate it's 3D location in the MLS frame.
+ /** \brief Given a point calculate its 3D location in the MLS frame.
* \param[in] pt The point
* \param[out] u The u-coordinate of the point in local MLS frame.
* \param[out] v The v-coordinate of the point in local MLS frame.
inline void
getMLSCoordinates (const Eigen::Vector3d &pt, double &u, double &v, double &w) const;
- /** \brief Given a point calculate it's 2D location in the MLS frame.
+ /** \brief Given a point calculate its 2D location in the MLS frame.
* \param[in] pt The point
* \param[out] u The u-coordinate of the point in local MLS frame.
* \param[out] v The v-coordinate of the point in local MLS frame.
/** \brief Calculate the polynomial
* \param[in] u The u-coordinate of the point in local MLS frame.
* \param[in] v The v-coordinate of the point in local MLS frame.
- * \return The polynomial value at the provide uv coordinates.
+ * \return The polynomial value at the provided uv coordinates.
*/
inline double
getPolynomialValue (const double u, const double v) const;
/** \brief Calculate the polynomial's first and second partial derivatives.
* \param[in] u The u-coordinate of the point in local MLS frame.
* \param[in] v The v-coordinate of the point in local MLS frame.
- * \return The polynomial partial derivatives at the provide uv coordinates.
+ * \return The polynomial partial derivatives at the provided uv coordinates.
*/
inline PolynomialPartialDerivative
getPolynomialPartialDerivative (const double u, const double v) const;
- /** \brief Calculate the principle curvatures using the polynomial surface.
+ /** \brief Calculate the principal curvatures using the polynomial surface.
* \param[in] u The u-coordinate of the point in local MLS frame.
* \param[in] v The v-coordinate of the point in local MLS frame.
- * \return The principle curvature [k1, k2] at the provided ub coordinates.
- * \note If an error occurs the MLS_MINIMUM_PRINCIPLE_CURVATURE is returned.
+ * \return The principal curvature [k1, k2] at the provided uv coordinates.
+ * \note If an error occurs then 1e-5 is returned.
*/
+ Eigen::Vector2f
+ calculatePrincipalCurvatures (const double u, const double v) const;
+
+ /** \brief Calculate the principal curvatures using the polynomial surface.
+ * \param[in] u The u-coordinate of the point in local MLS frame.
+ * \param[in] v The v-coordinate of the point in local MLS frame.
+ * \return The principal curvature [k1, k2] at the provided ub coordinates.
+ * \note If an error occurs then 1e-5 is returned.
+ */
+ PCL_DEPRECATED(1, 15, "use calculatePrincipalCurvatures() instead")
inline Eigen::Vector2f
- calculatePrincipleCurvatures (const double u, const double v) const;
+ calculatePrincipleCurvatures (const double u, const double v) const { return calculatePrincipalCurvatures(u, v); };
/** \brief Project a point orthogonal to the polynomial surface.
* \param[in] u The u-coordinate of the point in local MLS frame.
* \param[in] pt The point to be project.
* \param[in] method The projection method to be used.
* \param[in] required_neighbors The minimum number of neighbors required.
- * \note If required_neighbors then any number of neighbors is allowed.
+ * \note If required_neighbors is 0 then any number of neighbors is allowed.
* \note If required_neighbors is not satisfied it projects to the mls plane.
* \return The MLSProjectionResults for the input data.
*/
* \brief Project the query point used to generate the mls surface about using the specified method.
* \param[in] method The projection method to be used.
* \param[in] required_neighbors The minimum number of neighbors required.
- * \note If required_neighbors then any number of neighbors is allowed.
+ * \note If required_neighbors is 0 then any number of neighbors is allowed.
* \note If required_neighbors is not satisfied it projects to the mls plane.
* \return The MLSProjectionResults for the input data.
*/
*/
template <typename PointT> void
computeMLSSurface (const pcl::PointCloud<PointT> &cloud,
- int index,
- const std::vector<int> &nn_indices,
+ pcl::index_t index,
+ const pcl::Indices &nn_indices,
double search_radius,
int polynomial_order = 2,
std::function<double(const double)> weight_func = {});
using PointCloudInPtr = typename PointCloudIn::Ptr;
using PointCloudInConstPtr = typename PointCloudIn::ConstPtr;
- using SearchMethod = std::function<int (int, double, std::vector<int> &, std::vector<float> &)>;
+ using SearchMethod = std::function<int (pcl::index_t, double, pcl::Indices &, std::vector<float> &)>;
enum UpsamplingMethod
{
{
tree_ = tree;
// Declare the search locator definition
- search_method_ = [this] (int index, double radius, std::vector<int>& k_indices, std::vector<float>& k_sqr_distances)
+ search_method_ = [this] (pcl::index_t index, double radius, pcl::Indices& k_indices, std::vector<float>& k_sqr_distances)
{
return tree_->radiusSearch (index, radius, k_indices, k_sqr_distances, 0);
};
inline int
getPolynomialOrder () const { return (order_); }
- /** \brief Sets whether the surface and normal are approximated using a polynomial, or only via tangent estimation.
- * \param[in] polynomial_fit set to true for polynomial fit
- */
- PCL_DEPRECATED(1, 12, "use setPolynomialOrder() instead")
- inline void
- setPolynomialFit (bool polynomial_fit)
- {
- if (polynomial_fit)
- {
- if (order_ < 2)
- {
- order_ = 2;
- }
- }
- else
- {
- order_ = 0;
- }
- }
-
- /** \brief Get the polynomial_fit value (true if the surface and normal are approximated using a polynomial). */
- PCL_DEPRECATED(1, 12, "use getPolynomialOrder() instead")
- inline bool
- getPolynomialFit () const { return (order_ > 1); }
-
/** \brief Set the sphere radius that is to be used for determining the k-nearest neighbors used for fitting.
* \param[in] radius the sphere radius that is to contain all k-nearest neighbors
* \note Calling this method resets the squared Gaussian parameter to radius * radius !
/** \brief Set whether the mls results should be stored for each point in the input cloud
* \param[in] cache_mls_results True if the mls results should be stored, otherwise false.
- * \note The cache_mls_results_ is forced to true when using upsampling method VOXEL_GRID_DILATION or DISTINCT_CLOUD.
- * \note If memory consumption is a concern set to false when not using upsampling method VOXEL_GRID_DILATION or DISTINCT_CLOUD.
+ * \note The cache_mls_results_ is forced to be true when using upsampling method VOXEL_GRID_DILATION or DISTINCT_CLOUD.
+ * \note If memory consumption is a concern, then set it to false when not using upsampling method VOXEL_GRID_DILATION or DISTINCT_CLOUD.
*/
inline void
setCacheMLSResults (bool cache_mls_results) { cache_mls_results_ = cache_mls_results; }
/** \brief Get the MLSResults for input cloud
* \note The results are only stored if setCacheMLSResults(true) was called or when using the upsampling method DISTINCT_CLOUD or VOXEL_GRID_DILATION.
- * \note This vector is align with the input cloud indices, so use getCorrespondingIndices to get the correct results when using output cloud indices.
+ * \note This vector is aligned with the input cloud indices, so use getCorrespondingIndices to get the correct results when using output cloud indices.
*/
inline const std::vector<MLSResult>&
getMLSResults () const { return (mls_results_); }
int desired_num_points_in_radius_;
/** \brief True if the mls results for the input cloud should be stored
- * \note This is forced to true when using upsampling methods VOXEL_GRID_DILATION or DISTINCT_CLOUD.
+ * \note This is forced to be true when using upsampling methods VOXEL_GRID_DILATION or DISTINCT_CLOUD.
*/
bool cache_mls_results_;
/** \brief Collects for each point in output the corrseponding point in the input. */
PointIndicesPtr corresponding_input_indices_;
- /** \brief Search for the closest nearest neighbors of a given point using a radius search
+ /** \brief Search for the nearest neighbors of a given point using a radius search
* \param[in] index the index of the query point
- * \param[out] indices the resultant vector of indices representing the k-nearest neighbors
- * \param[out] sqr_distances the resultant squared distances from the query point to the k-nearest neighbors
+ * \param[out] indices the resultant vector of indices representing the neighbors within search_radius_
+ * \param[out] sqr_distances the resultant squared distances from the query point to the neighbors within search_radius_
*/
inline int
- searchForNeighbors (int index, std::vector<int> &indices, std::vector<float> &sqr_distances) const
+ searchForNeighbors (pcl::index_t index, pcl::Indices &indices, std::vector<float> &sqr_distances) const
{
return (search_method_ (index, search_radius_, indices, sqr_distances));
}
/** \brief Smooth a given point and its neighborghood using Moving Least Squares.
* \param[in] index the index of the query point in the input cloud
* \param[in] nn_indices the set of nearest neighbors indices for pt
- * \param[out] projected_points the set of points projected points around the query point
+ * \param[out] projected_points the set of projected points around the query point
* (in the case of upsampling method NONE, only the query point projected to its own fitted surface will be returned,
* in the case of the other upsampling methods, multiple points will be returned)
* \param[out] projected_points_normals the normals corresponding to the projected points
* (used only in the case of VOXEL_GRID_DILATION or DISTINCT_CLOUD upsampling)
*/
void
- computeMLSPointNormal (int index,
- const std::vector<int> &nn_indices,
+ computeMLSPointNormal (pcl::index_t index,
+ const pcl::Indices &nn_indices,
PointCloudOut &projected_points,
NormalCloud &projected_points_normals,
PointIndices &corresponding_input_indices,
MLSResult &mls_result) const;
- /** \brief This is a helper function for add projected points
+ /** \brief This is a helper function for adding projected points
* \param[in] index the index of the query point in the input cloud
* \param[in] point the projected point to be added
* \param[in] normal the projected point's normal to be added
* \param[out] corresponding_input_indices the set of indices with each point in output having the corresponding point in input
*/
void
- addProjectedPointNormal (int index,
+ addProjectedPointNormal (pcl::index_t index,
const Eigen::Vector3d &point,
const Eigen::Vector3d &normal,
double curvature,
std::string
getClassName () const { return ("MovingLeastSquares"); }
};
-
- template <typename PointInT, typename PointOutT>
- using MovingLeastSquaresOMP PCL_DEPRECATED(1, 12, "use MovingLeastSquares instead, it supports OpenMP now") = MovingLeastSquares<PointInT, PointOutT>;
}
#ifdef PCL_NO_PRECOMPILE
#pragma once
#include <vector>
-#include <list>
-#include <cstdio>
#undef Success
#include <Eigen/StdVector>
#pragma once
#undef Success
-#include <Eigen/Dense>
-
#include <pcl/memory.h>
#include <pcl/pcl_macros.h>
#include <pcl/surface/on_nurbs/sparse_mat.h>
#include <pcl/surface/3rdparty/opennurbs/opennurbs.h>
#undef Success
-#include <Eigen/Dense>
namespace pcl
{
/** \brief Convert point-cloud */
static unsigned
- PCL2ON (pcl::PointCloud<pcl::PointXYZRGB>::Ptr &pcl_cloud, const std::vector<int> &indices, vector_vec3d &cloud);
+ PCL2ON (pcl::PointCloud<pcl::PointXYZRGB>::Ptr &pcl_cloud, const pcl::Indices &indices, vector_vec3d &cloud);
public:
PCL_MAKE_ALIGNED_OPERATOR_NEW
#include <vector>
#include <map>
-#include <cstdio>
namespace pcl
{
inline bool
getManifold () { return manifold_; }
+ /** \brief Set the number of threads to use.
+ * \param[in] threads the number of threads
+ */
+ void
+ setThreads(int threads);
+
+
+ /** \brief Get the number of threads*/
+ inline int
+ getThreads()
+ {
+ return threads_;
+ }
+
protected:
/** \brief Class get name method. */
std::string
bool show_residual_;
int min_iterations_;
float solver_accuracy_;
+ int threads_;
template<int Degree> void
execute (poisson::CoredVectorMeshData &mesh,
extern "C"
{
-#ifdef HAVE_QHULL_2011
-# include "libqhull/libqhull.h"
-# include "libqhull/mem.h"
-# include "libqhull/qset.h"
-# include "libqhull/geom.h"
-# include "libqhull/merge.h"
-# include "libqhull/poly.h"
-# include "libqhull/io.h"
-# include "libqhull/stat.h"
-#else
-# include "qhull/qhull.h"
-# include "qhull/mem.h"
-# include "qhull/qset.h"
-# include "qhull/geom.h"
-# include "qhull/merge.h"
-# include "qhull/poly.h"
-# include "qhull/io.h"
-# include "qhull/stat.h"
-#endif
+#include "libqhull_r/libqhull_r.h"
+#include "libqhull_r/mem_r.h"
+#include "libqhull_r/qset_r.h"
+#include "libqhull_r/geom_r.h"
+#include "libqhull_r/merge_r.h"
+#include "libqhull_r/poly_r.h"
+#include "libqhull_r/io_r.h"
+#include "libqhull_r/stat_r.h"
}
#endif
#include <pcl/pcl_base.h>
#include <pcl/PolygonMesh.h>
-#include <pcl/search/pcl_search.h>
-#include <pcl/conversions.h>
-#include <pcl/surface/boost.h>
+#include <pcl/search/search.h> // for Search
namespace pcl
{
* - \b reconstruct(&PolygonMesh): creates a PolygonMesh object from the input data
*
* \author Radu B. Rusu, Michael Dixon, Alexandru E. Ichim
+ * \ingroup surface
*/
template <typename PointInT>
class PCLSurfaceBase: public PCLBase<PointInT>
#include <vector> // for vector
#include <pcl/memory.h>
#include <pcl/pcl_macros.h>
+#include <pcl/types.h> // for pcl::Indices
namespace pcl
{
inline void
simplify (const pcl::PolygonMesh& input, pcl::PolygonMesh& output)
{
- std::vector<int> indices;
+ pcl::Indices indices;
simplify (input, output, indices);
}
* \param[out] indices the resultant vector of indices
*/
void
- simplify (const pcl::PolygonMesh& input, pcl::PolygonMesh& output, std::vector<int>& indices);
+ simplify (const pcl::PolygonMesh& input, pcl::PolygonMesh& output, pcl::Indices& indices);
};
}
#pragma once
#include <pcl/pcl_base.h>
-#include <pcl/search/pcl_search.h>
+#include <pcl/search/search.h> // for Search
namespace pcl
{
#include <pcl/surface/reconstruction.h>
#include <pcl/common/transforms.h>
#include <pcl/TextureMesh.h>
+#include <pcl/octree/octree_search.h> // for OctreePointCloudSearch
namespace pcl
void
removeOccludedPoints (const PointCloudPtr &input_cloud,
PointCloudPtr &filtered_cloud, const double octree_voxel_size,
- std::vector<int> &visible_indices, std::vector<int> &occluded_indices);
+ pcl::Indices &visible_indices, pcl::Indices &occluded_indices);
/** \brief Remove occluded points from a textureMesh
* \param[in] tex_mesh input mesh, on witch to perform occlusion detection
memset(&m_rect,0,sizeof(m_rect));
}
-// SDKBREAK Oct 30, 07 - LW
-// This function should not be used any longer
-PCL_DEPRECATED(1, 12, "Use the version that takes a model transform argument")
-bool ON_Annotation2::GetTextXform(
- ON_RECT /*gdi_text_rect*/,
- const ON_Font& /*font*/,
- const ON_DimStyle& /*dimstyle*/,
- double /*dimscale*/,
- const ON_Viewport* /*vp*/,
- ON_Xform& /*xform*/
- ) const
-{
- ON_ERROR("This function should not be used. Use the version that takes a model transform argument.");
- return false;
-
- //const int gdi_height_of_I = font.HeightOfI();
- //const double dimstyle_textheight = dimstyle.TextHeight();
- //double dimstyle_textgap = dimstyle.TextGap();
- //const ON::eTextDisplayMode dimstyle_textalignment
- // = ON::TextDisplayMode(dimstyle.TextAlignment()) ;
- //const ON_3dVector cameraX = (vp) ? vp->CameraX() : m_plane.xaxis;
- //const ON_3dVector cameraY = (vp) ? vp->CameraY() : m_plane.yaxis;
-
- //// SDKBREAK - Oct 4, 07 LW Hack to get correct text gap using
- //// multi-line tolerance text since GetTextXform doesn't do that.
- //if(( dimstyle.ToleranceStyle() == 2 || dimstyle.ToleranceStyle() == 3) &&
- // ( Type() == ON::dtDimLinear || Type() == ON::dtDimAligned))
- // dimstyle_textgap += dimstyle_textheight * 0.5;
-
- //return GetTextXform(
- // gdi_text_rect,
- // gdi_height_of_I,
- // dimstyle_textheight, dimstyle_textgap, dimstyle_textalignment,
- // dimscale,
- // cameraX, cameraY,
- // xform
- // );
-}
-
-// New function added Oct 30, 07 - LW
-// To use model xform to draw annotation in blocks correctly
-#if 0
-PCL_DEPRECATED(1, 12, "Use the version that takes a dimstyle pointer")
-bool ON_Annotation2::GetTextXform(
- ON_RECT gdi_text_rect,
- const ON_Font& font,
- const ON_DimStyle& dimstyle,
- double dimscale,
- const ON_Viewport* vp,
- const ON_Xform* model_xform,
- ON_Xform& xform
- ) const
-{
- ON_ERROR("This function should not be used. Use the one below that takes a dimstyle pointer.");
- return false;
-}
-#endif
-
bool ON_Annotation2::GetTextXform(
ON_RECT gdi_text_rect,
const ON_Font& font,
return rc;
}
-// SDKBREAK Oct 30, 07 - LW
-// This function should not be used any longer
-PCL_DEPRECATED(1, 12, "Use the version that takes a model transform argument")
-bool ON_Annotation2::GetTextXform(
- ON_RECT /*gdi_text_rect*/,
- int /*gdi_height_of_I*/,
- double /*dimstyle_textheight*/,
- double /*dimstyle_textgap*/,
- ON::eTextDisplayMode /*dimstyle_textalignment*/,
- double /*dimscale*/,
- ON_3dVector /*cameraX*/,
- ON_3dVector /*cameraY*/,
- ON_Xform& /*xform*/
- ) const
-{
- ON_ERROR("This function should not be used. Use the version that takes a model transform argument.");
- return false;
-
- //const ON_Annotation2* ann = this;
-
- //const ON::eAnnotationType ann_type = ann->m_type;
-
- //if ( 0 == gdi_height_of_I )
- //{
- // // Default to height of Ariel 'I'
- // gdi_height_of_I = (165*ON_Font::normal_font_height)/256;
- //}
-
- //if ( 0.0 == dimscale )
- //{
- // dimscale = 1.0;
- //}
-
- //dimstyle_textheight *= dimscale;
- //dimstyle_textgap *= dimscale;
-
- //double textheight = ( ON::dtTextBlock == ann_type )
- // ? m_textheight*dimscale
- // : dimstyle_textheight;
- //if ( 0.0 == textheight )
- // textheight = 1.0;
-
- //ON_3dVector cameraZ = ON_CrossProduct( cameraX, cameraY );
- //if ( fabs( 1.0 - cameraZ.Length() ) > ON_SQRT_EPSILON )
- //{
- // cameraZ.Unitize();
- //}
-
- //// This xform is a scale from Windows gdi coordinates
- //// to annotation plane coordinates.
- //const double gdi_to_plane_scale = textheight/gdi_height_of_I;
- //ON_Xform gdi_to_plane(1.0);
- //gdi_to_plane.m_xform[0][0] = gdi_to_plane_scale;
- //gdi_to_plane.m_xform[1][1] = -gdi_to_plane_scale;
-
- //// width and height of text line in Rhino units.
- //const double text_line_width = gdi_to_plane_scale*(gdi_text_rect.right - gdi_text_rect.left);
- ////const double text_line_height = gdi_to_plane_scale*(gdi_text_rect.bottom - gdi_text_rect.top);
-
- //if ( ON::dtTextBlock == ann_type )
- //{
- // // The orientation of the text is text blocks
- // // does not depend on the view or text alignment
- // // settings. The position and orientation of
- // // the text in every other annotation depends on
- // // the view and text alignment settings.
- // //
- // // It simplifies the code for the rest of the
- // // annotation settings to quickly deal with text
- // // blocks here.
- // ON_Xform plane_to_world(1.0);
- // plane_to_world.Rotation(ON_xy_plane,ann->m_plane);
- // xform = plane_to_world*gdi_to_plane;
- // return true;
- //}
-
-
- //// text_position_mode
- //// 1 = linear, aligned, or anglular dimension
- //// (dimension definition determines center point of text box)
- //// 2 = radial, diameter, leader
- //// (dimension definition determined end point of text box)
- //int position_style = 0;
- //switch( ann_type )
- //{
- //case ON::dtDimAligned:
- //case ON::dtDimLinear:
- //case ON::dtDimAngular:
- // // dimension definition determines center point of text box
- // position_style = 1;
- // break;
-
- //case ON::dtLeader:
- //case ON::dtDimRadius:
- //case ON::dtDimDiameter:
- //case ON::dtDimOrdinate:
- // // dimension definition determines end of text box
- // position_style = 2;
- // break;
-
- //case ON::dtTextBlock:
- //case ON::dtNothing:
- // break;
- //}
-
-
- //// This translation puts the center of the fist line of text at
- //// (0,0) in the annotation's plane.
- //if ( ON::dtHorizontal != dimstyle_textalignment || 1 == position_style )
- //{
-
- // gdi_to_plane.m_xform[0][3] = -0.5*text_line_width;
- // gdi_to_plane.m_xform[0][3] = -0.5*text_line_width;
- //}
- //gdi_to_plane.m_xform[1][3] = -0.5*textheight;
-
- //if ( ON::dtHorizontal != dimstyle_textalignment )
- //{
- // if ( ((cameraZ*m_plane.zaxis) < -ON_SQRT_EPSILON) )
- // {
- // // Viewing dimension from the backside
- // ON_Xform flip(1.0);
- // switch ( position_style )
- // {
- // case 1: // ON::dtDimLinear, ON::dtDimAligned, ON::dtDimAngular
- // flip.m_xform[0][0] = -1.0;
- // flip.m_xform[0][3] = gdi_text_rect.left + gdi_text_rect.right;
- // break;
-
- // case 2: // ON::dtDimDiameter, ON::dtDimRadius, ON::dtLeader
- // flip.m_xform[1][1] = -1.0;
- // flip.m_xform[1][3] = gdi_text_rect.top + gdi_text_rect.bottom;
- // break;
- // }
- // gdi_to_plane = gdi_to_plane*flip;
- // }
- //}
-
- //// text_centering_rotation rotates about the "center". Angular,
- //// radial, and leader dimensions use this rotation.
- //ON_2dVector text_centering_rotation(1.0,0.0);
-
- //// text_centering_translation is a small translation deals with
- //// text that is above or to the right of the "center" point.
- //// It is no larger than dimstyle_gap + 1/2 the size of the
- //// text's bounding box.
- //ON_2dVector text_centering_translation(0.0,0.0);
-
- //double x, y;
-
- //if ( ON::dtHorizontal != dimstyle_textalignment )
- //{
- // if ( ON::dtDimLinear == ann_type || ON::dtDimAligned == ann_type )
- // {
- // if ( ON::dtAboveLine == dimstyle_textalignment )
- // {
- // text_centering_translation.y = 0.5*textheight+dimstyle_textgap;
- // }
- // y = ann->m_plane.yaxis*cameraY;
- // x = -ann->m_plane.yaxis*cameraX;
- // if ( fabs(y) <= ON_SQRT_EPSILON && fabs(x) > ON_SQRT_EPSILON )
- // {
- // y = x;
- // }
- // if ( y < 0.0 )
- // {
- // text_centering_translation.Reverse();
- // text_centering_rotation.Reverse(); // rotate 180 degrees
- // }
- // }
- // else if ( ON::dtDimAngular == ann_type )
- // {
- // // This transform rotates the text in the annotation plane.
- // const ON_AngularDimension2* angular_dim = ON_AngularDimension2::Cast(ann);
- // if ( 0 != angular_dim )
- // {
- // double a = 0.5*angular_dim->m_angle;
- // ON_2dVector R(cos(a),sin(a));
- // a -= 0.5*ON_PI;
- // text_centering_rotation.x = cos(a);
- // text_centering_rotation.y = sin(a);
- // ON_3dVector V = R.x*m_plane.xaxis + R.y*m_plane.yaxis;
- // x = V*cameraX;
- // y = V*cameraY;
- // if ( fabs(y) <= ON_SQRT_EPSILON && fabs(x) > ON_SQRT_EPSILON )
- // {
- // y = -x;
- // }
- // if ( y < 0.0 )
- // {
- // text_centering_rotation.Reverse(); // add another 180 degrees of rotation
- // }
-
- // if ( ON::dtAboveLine == dimstyle_textalignment )
- // {
- // y = 0.5*textheight + dimstyle_textgap;
- // text_centering_translation.x = -y*text_centering_rotation.y;
- // text_centering_translation.y = y*text_centering_rotation.x;
- // }
- // }
- // }
- // else if ( ON::dtDimDiameter == ann_type
- // || ON::dtDimRadius == ann_type
- // || ON::dtLeader == ann_type
- // || ON::dtDimOrdinate == ann_type)
- // {
- // ON_2dPoint E(0.0,0.0); // end point
- // ON_2dVector R(1.0,0.0); // unit vector from penultimate point to end point
- // GetLeaderEndAndDirection( this, E, R );
-
- // text_centering_rotation = R;
-
- // text_centering_translation = (dimstyle_textgap + 0.5*text_line_width)*text_centering_rotation;
-
- // ON_3dVector V = text_centering_rotation.x*m_plane.xaxis + text_centering_rotation.y*m_plane.yaxis;
- // x = V*cameraX;
- // y = V*cameraY;
- // if ( fabs(x) <= ON_SQRT_EPSILON && fabs(y) > ON_SQRT_EPSILON )
- // {
- // x = y;
- // }
- // if ( x < 0.0 )
- // {
- // text_centering_rotation.Reverse(); // rotate 180 degrees
- // }
- // }
- //}
-
- //ON_Xform text_centering_xform(1.0);
- //text_centering_xform.m_xform[0][0] = text_centering_rotation.x;
- //text_centering_xform.m_xform[0][1] = -text_centering_rotation.y;
- //text_centering_xform.m_xform[1][0] = text_centering_rotation.y;
- //text_centering_xform.m_xform[1][1] = text_centering_rotation.x;
- //// Since the translation happens after the rotation about (0,0),
- //// we can just tack it on here.
- //text_centering_xform.m_xform[0][3] = text_centering_translation.x;
- //text_centering_xform.m_xform[1][3] = text_centering_translation.y;
-
- //// This transform translates the text in the annotation plane
- //// It can be a large translation
- //ON_2dVector text_offset_translation(0.0,0.0); // CRhinoText::Offset() = text->Offset()
- //switch( ann_type )
- //{
- //case ON::dtDimLinear:
- //case ON::dtDimAligned:
- // if ( m_points.Count() >= ON_LinearDimension2::dim_pt_count )
- // {
- // const ON_LinearDimension2* linear_dim = ON_LinearDimension2::Cast(ann);
- // if ( linear_dim )
- // {
- // text_offset_translation = linear_dim->Dim2dPoint(ON_LinearDimension2::text_pivot_pt);
- // }
- // }
- // break;
-
- //case ON::dtDimAngular:
- // if ( m_points.Count() >= ON_AngularDimension2::dim_pt_count )
- // {
- // const ON_AngularDimension2* angular_dim = ON_AngularDimension2::Cast(ann);
- // if ( angular_dim )
- // {
- // text_offset_translation = angular_dim->Dim2dPoint(ON_AngularDimension2::text_pivot_pt);
- // }
- // }
- // break;
-
- //case ON::dtDimDiameter:
- //case ON::dtDimRadius:
- // if ( m_points.Count() >= ON_RadialDimension2::dim_pt_count )
- // {
- // // No user positioned text on radial dimensions.
- // text_offset_translation = m_points[ON_RadialDimension2::tail_pt_index];
- // }
- // break;
-
- //case ON::dtLeader:
- // if ( m_points.Count() > 0 )
- // {
- // // No user positioned text on leaders.
- // text_offset_translation = *m_points.Last();
- // }
- // break;
-
- //case ON::dtDimOrdinate:
- // if ( m_points.Count() == 2 )
- // {
- // // No user positioned text on leaders.
- // text_offset_translation = m_points[1];
- // }
- // break;
-
- //case ON::dtTextBlock:
- //case ON::dtNothing:
- // break;
- //}
-
- //ON_Xform plane_translation(1.0);
- //plane_translation.m_xform[0][3] = text_offset_translation.x;
- //plane_translation.m_xform[1][3] = text_offset_translation.y;
-
- //// this transform maps a point in the annotation plane to world coordinates
- //ON_Xform plane_to_world(1.0);
- //plane_to_world.Rotation(ON_xy_plane,ann->m_plane);
-
- //ON_Xform horizonal_xform(1.0);
- //if ( ON::dtHorizontal == dimstyle_textalignment )
- //{
- // ON_3dPoint fixed_point = ann->m_plane.PointAt(text_offset_translation.x,text_offset_translation.y);
- // horizonal_xform.Rotation(
- // fixed_point,
- // ann->m_plane.xaxis,
- // ann->m_plane.yaxis,
- // ann->m_plane.zaxis,
- // fixed_point,
- // cameraX,
- // cameraY,
- // cameraZ
- // );
-
- // if ( 2 == position_style )
- // {
- // // leaders, radial, and diameter
- // ON_2dPoint E(0.0,0.0); // end point
- // ON_2dVector R(1.0,0.0); // unit vector from penultimate point to end point
- // GetLeaderEndAndDirection( this, E, R );
- // ON_3dVector V = R.x*m_plane.xaxis + R.y*m_plane.yaxis;
- // x = V*cameraX;
- // y = ( x > -ON_SQRT_EPSILON )
- // ? dimstyle_textgap
- // : -(dimstyle_textgap + text_line_width);
- // V = y*cameraX;
- // horizonal_xform.m_xform[0][3] += V.x;
- // horizonal_xform.m_xform[1][3] += V.y;
- // horizonal_xform.m_xform[2][3] += V.z;
- // }
- //}
-
- //ON_Xform gdi_to_world;
- //gdi_to_world = horizonal_xform
- // * plane_to_world
- // * plane_translation
- // * text_centering_xform
- // * gdi_to_plane;
-
- //xform = gdi_to_world;
-
- //return true;
-}
-
//static bool do_plane_translation = true;
//static bool do_text_centering_xform = true;
//static bool do_text_centering_rotation = true;
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
*/
-#include <math.h>
#include <pcl/surface/3rdparty/poisson4/marching_cubes_poisson.h>
////////////
return;
}
- std::vector<std::uint32_t> remaining_vertices (n_vertices);
+ Indices remaining_vertices (n_vertices);
if (area (vertices.vertices) > 0) // clockwise?
remaining_vertices = vertices.vertices;
else
/////////////////////////////////////////////////////////////////////////////////////////////
float
-pcl::EarClipping::area (const std::vector<std::uint32_t>& vertices)
+pcl::EarClipping::area (const Indices& vertices)
{
//if the polygon is projected onto the xy-plane, the area of the polygon is determined
//by the trapeze formula of Gauss. However this fails, if the projection is one 'line'.
return area * 0.5f;
}
-
/////////////////////////////////////////////////////////////////////////////////////////////
bool
-pcl::EarClipping::isEar (int u, int v, int w, const std::vector<std::uint32_t>& vertices)
+pcl::EarClipping::isEar (int u, int v, int w, const Indices& vertices)
{
Eigen::Vector3f p_u, p_v, p_w;
p_u = (*points_)[vertices[u]].getVector3fMap();
#include <pcl/point_types.h>
#include <pcl/surface/marching_cubes_hoppe.h>
#include <pcl/surface/impl/marching_cubes_hoppe.hpp>
-#include <pcl/surface/impl/marching_cubes.hpp>
// Instantiations of specific point types
PCL_INSTANTIATE(MarchingCubesHoppe, (pcl::PointNormal)(pcl::PointXYZRGBNormal)(pcl::PointXYZINormal))
*/
#include <pcl/surface/on_nurbs/closing_boundary.h>
+#include <Eigen/Geometry> // for cross
using namespace pcl;
using namespace on_nurbs;
#include <pcl/surface/on_nurbs/fitting_curve_2d.h>
#include <stdexcept>
+#include <Eigen/LU> // for inverse
using namespace pcl;
using namespace on_nurbs;
#include <pcl/surface/on_nurbs/fitting_curve_2d_apdm.h>
#include <pcl/pcl_macros.h>
#include <stdexcept>
+#include <Eigen/Geometry> // for cross
using namespace pcl;
using namespace on_nurbs;
#include <pcl/surface/on_nurbs/fitting_curve_2d_asdm.h>
#include <stdexcept>
+#include <Eigen/Geometry> // for cross
using namespace pcl;
using namespace on_nurbs;
#include <pcl/surface/on_nurbs/fitting_curve_2d_atdm.h>
#include <stdexcept>
+#include <Eigen/Geometry> // for cross
using namespace pcl;
using namespace on_nurbs;
#include <stdexcept>
#include <pcl/surface/on_nurbs/fitting_cylinder_pdm.h>
#include <pcl/pcl_macros.h>
+#include <Eigen/Cholesky> // for ldlt
+#include <Eigen/Geometry> // for cross
+#include <Eigen/LU> // for inverse
using namespace pcl;
using namespace on_nurbs;
#include <stdexcept>
#include <pcl/surface/on_nurbs/fitting_sphere_pdm.h>
#include <pcl/pcl_macros.h>
+#include <Eigen/Cholesky> // for ldlt
+#include <Eigen/Geometry> // for cross
using namespace pcl;
using namespace on_nurbs;
#include <pcl/surface/on_nurbs/fitting_surface_im.h>
#include <pcl/pcl_macros.h>
+#include <Eigen/Cholesky> // for ldlt
#include <stdexcept>
using namespace pcl;
double ds = 1.0 / double (m_indices.size ());
const pcl::PointCloud<pcl::PointXYZRGB> &cloud_ref = *m_cloud;
- for (const int &index : m_indices)
+ for (const auto &index : m_indices)
{
int i = index % cloud_ref.width;
int j = index / cloud_ref.width;
Eigen::Vector4d bb = Eigen::Vector4d (DBL_MAX, 0, DBL_MAX, 0);
const pcl::PointCloud<pcl::PointXYZRGB> &cloud_ref = *cloud;
- for (const int &index : indices)
+ for (const auto &index : indices)
{
int i = index % cloud_ref.width;
int j = index / cloud_ref.width;
// assemble data points
const pcl::PointCloud<pcl::PointXYZRGB> &cloud_ref = *m_cloud;
- for (const int &index : m_indices)
+ for (const auto &index : m_indices)
{
int px = index % cloud_ref.width;
int py = index / cloud_ref.width;
#include <pcl/surface/on_nurbs/fitting_surface_pdm.h>
#include <pcl/pcl_macros.h>
+#include <Eigen/Cholesky> // for ldlt
+#include <Eigen/Geometry> // for cross
+#include <Eigen/LU> // for inverse
#include <stdexcept>
using namespace pcl;
#include <pcl/surface/on_nurbs/fitting_surface_tdm.h>
#include <pcl/pcl_macros.h>
+#include <Eigen/Geometry> // for cross
#include <stdexcept>
using namespace pcl;
#include <pcl/surface/on_nurbs/closing_boundary.h>
#include <pcl/pcl_macros.h>
+#include <Eigen/Geometry> // for cross
#include <stdexcept>
#undef DEBUG
#include <pcl/surface/on_nurbs/closing_boundary.h>
#include <pcl/pcl_macros.h>
+#include <Eigen/Geometry> // for cross
#include <stdexcept>
using namespace pcl;
#include <iostream>
#include <stdexcept>
+#include <Eigen/SVD> // for jacobiSvd
+
#include <pcl/surface/on_nurbs/nurbs_solve.h>
using namespace pcl;
#include <suitesparse/umfpack.h>
#include <iostream>
#include <stdio.h>
+#include <time.h> // for clock, clock_t
#include <stdexcept>
#include <pcl/surface/on_nurbs/nurbs_solve.h>
#include <stdexcept>
#include <map>
#include <algorithm>
+#include <Eigen/Eigenvalues> // for SelfAdjointEigenSolver
+#include <Eigen/Geometry> // for cross
#include <pcl/surface/on_nurbs/nurbs_tools.h>
using namespace pcl;
#include <pcl/surface/on_nurbs/sequential_fitter.h>
+#include <Eigen/Geometry> // for cross
+
using namespace pcl;
using namespace on_nurbs;
using namespace Eigen;
}
unsigned
-SequentialFitter::PCL2ON (pcl::PointCloud<pcl::PointXYZRGB>::Ptr &pcl_cloud, const std::vector<int> &indices,
+SequentialFitter::PCL2ON (pcl::PointCloud<pcl::PointXYZRGB>::Ptr &pcl_cloud, const pcl::Indices &indices,
vector_vec3d &on_cloud)
{
std::size_t numPoints = 0;
- for (const int &index : indices)
+ for (const auto &index : indices)
{
pcl::PointXYZRGB &pt = pcl_cloud->at (index);
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
- *
+ *
*
*/
#include <pcl/surface/on_nurbs/sparse_mat.h>
+#include <cstdio> // for printf
using namespace pcl;
using namespace on_nurbs;
#include <pcl/surface/on_nurbs/fitting_curve_2d_apdm.h>
#include <pcl/conversions.h>
+#include <Eigen/Geometry> // for cross
+
using namespace pcl;
using namespace on_nurbs;
#include <iostream>
void
-pcl::surface::SimplificationRemoveUnusedVertices::simplify(const pcl::PolygonMesh& input, pcl::PolygonMesh& output, std::vector<int>& indices)
+pcl::surface::SimplificationRemoveUnusedVertices::simplify(const pcl::PolygonMesh& input, pcl::PolygonMesh& output, pcl::Indices& indices)
{
if (input.polygons.empty ())
return;
#include <pcl/surface/vtk_smoothing/vtk_mesh_smoothing_laplacian.h>
#include <pcl/surface/vtk_smoothing/vtk_utils.h>
-#include <vtkVersion.h>
#include <vtkSmoothPolyDataFilter.h>
#include <pcl/surface/vtk_smoothing/vtk_mesh_smoothing_windowed_sinc.h>
#include <pcl/surface/vtk_smoothing/vtk_utils.h>
-#include <vtkVersion.h>
#include <vtkWindowedSincPolyDataFilter.h>
#include <pcl/surface/vtk_smoothing/vtk_mesh_subdivision.h>
#include <pcl/surface/vtk_smoothing/vtk_utils.h>
-#include <vtkVersion.h>
#include <vtkLinearSubdivisionFilter.h>
#include <vtkLoopSubdivisionFilter.h>
#include <vtkButterflySubdivisionFilter.h>
#include <pcl/PolygonMesh.h>
#include <pcl/conversions.h>
#include <pcl/point_types.h> // for PointXYZ, PointXYZRGB, RGB
-#include <vtkVersion.h>
#include <vtkCellArray.h>
#include <vtkTriangleFilter.h>
#include <vtkPoints.h>
#include <vtkPolyData.h>
#include <vtkPointData.h>
#include <vtkFloatArray.h>
+#include <vtkUnsignedCharArray.h>
// Support for VTK 7.1 upwards
#ifdef vtkGenericDataArray_h
}
mesh.polygons.resize (nr_polygons);
+#ifdef VTK_CELL_ARRAY_V2
+ vtkIdType const *cell_points;
+#else
vtkIdType* cell_points;
+#endif
vtkIdType nr_cell_points;
vtkCellArray * mesh_polygons = poly_data->GetPolys ();
mesh_polygons->InitTraversal ();
#include <fstream>
#include <pcl/point_types.h>
-#include <pcl/pcl_base.h>
#include <pcl/io/pcd_io.h>
char *lena;
set(PCL_CTEST_ARGUMENTS ${PCL_CTEST_ARGUMENTS} -C $<$<CONFIG:Debug>:Debug>$<$<CONFIG:Release>:Release>)
endif()
+# Enables you to disable visualization tests. Used on CI.
+if(PCL_DISABLE_VISUALIZATION_TESTS)
+ list(APPEND EXCLUDE_TESTS visualization)
+endif()
+
# Enables you to disable GPU tests. Used on CI as it has no access to GPU hardware
if(PCL_DISABLE_GPU_TESTS)
- set(PCL_CTEST_ARGUMENTS ${PCL_CTEST_ARGUMENTS} -E gpu)
+ list(APPEND EXCLUDE_TESTS gpu)
+endif()
+
+#Check if there are any tests to exclude
+if(EXCLUDE_TESTS)
+ message(STATUS "Tests excluded: ${EXCLUDE_TESTS}")
+ string(REPLACE ";" "|" EXCLUDE_TESTS_REGEX "${EXCLUDE_TESTS}")
+ set(PCL_CTEST_ARGUMENTS ${PCL_CTEST_ARGUMENTS} -E "(${EXCLUDE_TESTS_REGEX})")
endif()
add_custom_target(tests "${CMAKE_CTEST_COMMAND}" ${PCL_CTEST_ARGUMENTS} -V -T Test VERBATIM)
add_subdirectory(io)
add_subdirectory(kdtree)
add_subdirectory(keypoints)
+add_subdirectory(ml)
add_subdirectory(people)
add_subdirectory(octree)
add_subdirectory(outofcore)
PCL_ADD_TEST(common_test_macros test_macros FILES test_macros.cpp LINK_WITH pcl_gtest pcl_common)
PCL_ADD_TEST(common_vector_average test_vector_average FILES test_vector_average.cpp LINK_WITH pcl_gtest)
PCL_ADD_TEST(common_common test_common FILES test_common.cpp LINK_WITH pcl_gtest pcl_common)
+PCL_ADD_TEST(common_pointcloud test_pointcloud FILES test_pointcloud.cpp LINK_WITH pcl_gtest pcl_common)
PCL_ADD_TEST(common_parse test_parse FILES test_parse.cpp LINK_WITH pcl_gtest pcl_common)
PCL_ADD_TEST(common_geometry test_geometry FILES test_geometry.cpp LINK_WITH pcl_gtest pcl_common)
PCL_ADD_TEST(common_copy_point test_copy_point FILES test_copy_point.cpp LINK_WITH pcl_gtest pcl_common)
PCL_ADD_TEST(common_bearing_angle_image test_bearing_angle_image FILES test_bearing_angle_image.cpp LINK_WITH pcl_gtest pcl_common)
PCL_ADD_TEST(common_polygon_mesh test_polygon_mesh_concatenate FILES test_polygon_mesh.cpp LINK_WITH pcl_gtest pcl_common)
PCL_ADD_TEST(common_point_type_conversion test_common_point_type_conversion FILES test_point_type_conversion.cpp LINK_WITH pcl_gtest pcl_common)
+PCL_ADD_TEST(common_point_type_static_member_functions test_common_point_type_static_member_functions FILES test_point_type_static_member_functions.cpp LINK_WITH pcl_gtest pcl_common)
PCL_ADD_TEST(common_colors test_colors FILES test_colors.cpp LINK_WITH pcl_gtest pcl_common)
PCL_ADD_TEST(common_type_traits test_type_traits FILES test_type_traits.cpp LINK_WITH pcl_gtest pcl_common)
*/
#include <pcl/test/gtest.h>
-#include <pcl/common/eigen.h>
#include <pcl/point_types.h>
#include <pcl/point_cloud.h>
#include <pcl/io/pcd_io.h>
*
*/
+#include <Eigen/Eigenvalues> // for SelfAdjointEigenSolver
+
#include <pcl/test/gtest.h>
#include <pcl/pcl_tests.h>
#include <pcl/common/common.h>
}
-//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-TEST (PCL, PointCloud)
-{
- PointCloud<PointXYZ> cloud;
- cloud.width = 640;
- cloud.height = 480;
-
- EXPECT_TRUE (cloud.isOrganized ());
-
- cloud.height = 1;
- EXPECT_FALSE (cloud.isOrganized ());
-
- cloud.width = 10;
- for (std::uint32_t i = 0; i < cloud.width*cloud.height; ++i)
- {
- float j = static_cast<float> (i);
- cloud.points.emplace_back(3.0f * j + 0.0f, 3.0f * j + 1.0f, 3.0f * j + 2.0f);
- }
-
- Eigen::MatrixXf mat_xyz1 = cloud.getMatrixXfMap ();
- Eigen::MatrixXf mat_xyz = cloud.getMatrixXfMap (3, 4, 0);
-
- if (Eigen::MatrixXf::Flags & Eigen::RowMajorBit)
- {
- EXPECT_EQ (mat_xyz1.cols (), 4);
- EXPECT_EQ (mat_xyz1.rows (), cloud.width);
- EXPECT_EQ (mat_xyz1 (0, 0), 0);
- EXPECT_EQ (mat_xyz1 (cloud.width - 1, 2), 3 * cloud.width - 1); // = 29
-
- EXPECT_EQ (mat_xyz.cols (), 3);
- EXPECT_EQ (mat_xyz.rows (), cloud.width);
- EXPECT_EQ (mat_xyz (0, 0), 0);
- EXPECT_EQ (mat_xyz (cloud.width - 1, 2), 3 * cloud.width - 1); // = 29
- }
- else
- {
- EXPECT_EQ (mat_xyz1.cols (), cloud.width);
- EXPECT_EQ (mat_xyz1.rows (), 4);
- EXPECT_EQ (mat_xyz1 (0, 0), 0);
- EXPECT_EQ (mat_xyz1 (2, cloud.width - 1), 3 * cloud.width - 1); // = 29
-
- EXPECT_EQ (mat_xyz.cols (), cloud.width);
- EXPECT_EQ (mat_xyz.rows (), 3);
- EXPECT_EQ (mat_xyz (0, 0), 0);
- EXPECT_EQ (mat_xyz (2, cloud.width - 1), 3 * cloud.width - 1); // = 29
- }
-
-#ifdef NDEBUG
- if (Eigen::MatrixXf::Flags & Eigen::RowMajorBit)
- {
- Eigen::MatrixXf mat_yz = cloud.getMatrixXfMap (2, 4, 1);
- EXPECT_EQ (mat_yz.cols (), 2);
- EXPECT_EQ (mat_yz.rows (), cloud.width);
- EXPECT_EQ (mat_yz (0, 0), 1);
- EXPECT_EQ (mat_yz (cloud.width - 1, 1), 3 * cloud.width - 1);
- std::uint32_t j = 1;
- for (std::uint32_t i = 1; i < cloud.width*cloud.height; i+=4, j+=3)
- {
- Eigen::MatrixXf mat_yz = cloud.getMatrixXfMap (2, 4, i);
- EXPECT_EQ (mat_yz.cols (), 2);
- EXPECT_EQ (mat_yz.rows (), cloud.width);
- EXPECT_EQ (mat_yz (0, 0), j);
- }
- }
- else
- {
- Eigen::MatrixXf mat_yz = cloud.getMatrixXfMap (2, 4, 1);
- EXPECT_EQ (mat_yz.cols (), cloud.width);
- EXPECT_EQ (mat_yz.rows (), 2);
- EXPECT_EQ (mat_yz (0, 0), 1);
- EXPECT_EQ (mat_yz (1, cloud.width - 1), 3 * cloud.width - 1);
- std::uint32_t j = 1;
- for (std::uint32_t i = 1; i < cloud.width*cloud.height; i+=4, j+=3)
- {
- Eigen::MatrixXf mat_yz = cloud.getMatrixXfMap (2, 4, i);
- EXPECT_EQ (mat_yz.cols (), cloud.width);
- EXPECT_EQ (mat_yz.rows (), 2);
- EXPECT_EQ (mat_yz (0, 0), j);
- }
- }
-#endif
- cloud.clear ();
- EXPECT_EQ (cloud.width, 0);
- EXPECT_EQ (cloud.height, 0);
-
- cloud.width = 640;
- cloud.height = 480;
-
- cloud.insert (cloud.end (), PointXYZ (1, 1, 1));
- EXPECT_FALSE (cloud.isOrganized ());
- EXPECT_EQ (cloud.width, 1);
-
- cloud.insert (cloud.end (), 5, PointXYZ (1, 1, 1));
- EXPECT_FALSE (cloud.isOrganized ());
- EXPECT_EQ (cloud.width, 6);
-
- cloud.erase (cloud.end () - 1);
- EXPECT_FALSE (cloud.isOrganized ());
- EXPECT_EQ (cloud.width, 5);
-
- cloud.erase (cloud.begin (), cloud.end ());
- EXPECT_FALSE (cloud.isOrganized ());
- EXPECT_EQ (cloud.width, 0);
-
- cloud.emplace (cloud.end (), 1, 1, 1);
- EXPECT_FALSE (cloud.isOrganized ());
- EXPECT_EQ (cloud.width, 1);
-
- auto& new_point = cloud.emplace_back (1, 1, 1);
- EXPECT_FALSE (cloud.isOrganized ());
- EXPECT_EQ (cloud.width, 2);
- EXPECT_EQ (&new_point, &cloud.back ());
-}
-
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
TEST (PCL, PointTypes)
{
EXPECT_FALSE ((pcl::traits::has_label<pcl::Normal>::value));
}
+TEST (PCL, GetMinMax3D)
+{
+ pcl::PointCloud<pcl::PointXYZ> cloud;
+ cloud.emplace_back ( 0.0f, 0.0f, 0.0f);
+ cloud.emplace_back (10.0f, -10000.0f, 1.0f);
+ cloud.emplace_back ( 5.0f, 5.0f, 0.0f);
+ cloud.emplace_back (-5.0f, 0.0f, -0.5f);
+
+ pcl::PointXYZ min_pt, max_pt;
+ Eigen::Vector4f min_vec, max_vec;
+
+ pcl::getMinMax3D (cloud, min_pt, max_pt);
+ EXPECT_EQ (min_pt.x, -5.0f);
+ EXPECT_EQ (min_pt.y, -10000.0f);
+ EXPECT_EQ (min_pt.z, -0.5f);
+ EXPECT_EQ (max_pt.x, 10.0f);
+ EXPECT_EQ (max_pt.y, 5.0f);
+ EXPECT_EQ (max_pt.z, 1.0f);
+
+ pcl::getMinMax3D (cloud, min_vec, max_vec);
+ EXPECT_EQ (min_vec.x (), -5.0f);
+ EXPECT_EQ (min_vec.y (), -10000.0f);
+ EXPECT_EQ (min_vec.z (), -0.5f);
+ EXPECT_EQ (max_vec.x (), 10.0f);
+ EXPECT_EQ (max_vec.y (), 5.0f);
+ EXPECT_EQ (max_vec.z (), 1.0f);
+
+ pcl::PointIndices pindices;
+ pindices.indices.push_back (0);
+ pindices.indices.push_back (2);
+ pcl::getMinMax3D (cloud, pindices, min_vec, max_vec);
+ EXPECT_EQ (min_vec.x (), 0.0f);
+ EXPECT_EQ (min_vec.y (), 0.0f);
+ EXPECT_EQ (min_vec.z (), 0.0f);
+ EXPECT_EQ (max_vec.x (), 5.0f);
+ EXPECT_EQ (max_vec.y (), 5.0f);
+ EXPECT_EQ (max_vec.z (), 0.0f);
+
+ pcl::Indices indices;
+ indices.push_back (1);
+ indices.push_back (3);
+ pcl::getMinMax3D (cloud, indices, min_vec, max_vec);
+ EXPECT_EQ (min_vec.x (), -5.0f);
+ EXPECT_EQ (min_vec.y (), -10000.0f);
+ EXPECT_EQ (min_vec.z (), -0.5f);
+ EXPECT_EQ (max_vec.x (), 10.0f);
+ EXPECT_EQ (max_vec.y (), 0.0f);
+ EXPECT_EQ (max_vec.z (), 1.0f);
+}
+
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
TEST (PCL, GetMaxDistance)
{
const Eigen::Vector4f pivot_pt (Eigen::Vector4f::Zero ());
// populate cloud
- cloud.points.resize (3);
+ cloud.resize (3);
cloud[0].data[0] = 4.f; cloud[0].data[1] = 3.f;
cloud[0].data[2] = 0.f; cloud[0].data[3] = 0.f;
cloud[1].data[0] = 0.f; cloud[1].data[1] = 0.f;
test::EXPECT_EQ_VECTORS (max_exp_pt, max_pt);
}
+TEST (PCL, computeMedian)
+{
+ std::vector<float> vector1{4.0f, 2.0f, 1.0f, 5.0f, 3.0f, 6.0f};
+ const auto median1 = computeMedian (vector1.begin (), vector1.end ());
+ EXPECT_EQ(median1, 3.5f);
+
+ std::vector<double> vector2{1.0, 25.0, 9.0, 4.0, 16.0};
+ const auto median2 = computeMedian (vector2.begin (), vector2.end (), [](const double& x){ return std::sqrt(x); });
+ EXPECT_EQ(median2, 3.0);
+
+ std::vector<double> vector3{1.0, 2.0, 6.0, 5.0, 4.0, 3.0};
+ const auto median3 = computeMedian (vector3.begin (), vector3.end (), [](const double& x){ return x+1.0; });
+ EXPECT_EQ(median3, 4.5);
+
+ std::vector<int> vector4{-1, 1, 2, 9, 15, 16};
+ const auto median4 = computeMedian (vector4.begin (), vector4.end ());
+ EXPECT_EQ(median4, 5);
+
+ std::vector<int> vector5{-1, 1, 2, 9, 15, 16};
+ const auto median5 = computeMedian (vector5.begin (), vector5.end (), [](const int& x){ return static_cast<double>(x); });
+ EXPECT_EQ(median5, 5.5);
+}
+
/* ---[ */
int
main (int argc, char** argv)
EXPECT_EQ (cloud_xyz_rgba[i].rgba, cloud_xyz_rgb_normal[i].rgba);
}
- IndicesAllocator< Eigen::aligned_allocator<int> > indices_aligned;
+ IndicesAllocator< Eigen::aligned_allocator<pcl::index_t> > indices_aligned;
indices_aligned.push_back (1); indices_aligned.push_back (2); indices_aligned.push_back (3);
pcl::copyPointCloud (cloud_xyz_rgba, indices_aligned, cloud_xyz_rgb_normal);
ASSERT_EQ (int (cloud_xyz_rgb_normal.size ()), 3);
}
}
-///////////////////////////////////////////////////////////////////////////////////////////
-// Ignore unknown pragma warning on MSVC (4996)
-#ifdef _MSC_VER
-#pragma warning(push)
-#pragma warning(disable: 4068)
-#endif
-#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
-#pragma GCC diagnostic push
-TEST (PCL, concatenatePointCloud)
-{
- CloudXYZRGBA cloud_xyz_rgba;
- cloud_xyz_rgba.push_back (pt_xyz_rgba);
- cloud_xyz_rgba.push_back (pt_xyz_rgba);
- cloud_xyz_rgba.push_back (pt_xyz_rgba);
- cloud_xyz_rgba.push_back (pt_xyz_rgba);
- cloud_xyz_rgba.push_back (pt_xyz_rgba);
-
- CloudXYZRGBA cloud_xyz_rgba2;
- cloud_xyz_rgba2.push_back (pt_xyz_rgba2);
- cloud_xyz_rgba2.push_back (pt_xyz_rgba2);
-
- pcl::PCLPointCloud2 cloud1, cloud2, cloud_out, cloud_out2, cloud_out3, cloud_out4;
- pcl::toPCLPointCloud2 (cloud_xyz_rgba, cloud1);
- pcl::toPCLPointCloud2 (cloud_xyz_rgba2, cloud2);
-
- // Regular
- pcl::concatenatePointCloud (cloud1, cloud2, cloud_out);
-
- CloudXYZRGBA cloud_all;
- pcl::fromPCLPointCloud2 (cloud_out, cloud_all);
-
- EXPECT_EQ (cloud_all.size (), cloud_xyz_rgba.size () + cloud_xyz_rgba2.size ());
- for (int i = 0; i < int (cloud_xyz_rgba.size ()); ++i)
- {
- EXPECT_XYZ_EQ (cloud_all[i], cloud_xyz_rgba[i]);
- EXPECT_RGB_EQ (cloud_all[i], cloud_xyz_rgba[i]);
- EXPECT_EQ (cloud_all[i].rgba, cloud_xyz_rgba[i].rgba);
- }
- for (int i = 0; i < int (cloud_xyz_rgba2.size ()); ++i)
- {
- EXPECT_XYZ_EQ (cloud_all[cloud_xyz_rgba.size () + i], cloud_xyz_rgba2[i]);
- EXPECT_RGB_EQ (cloud_all[cloud_xyz_rgba.size () + i], cloud_xyz_rgba2[i]);
- EXPECT_EQ (cloud_all[cloud_xyz_rgba.size () + i].rgba, cloud_xyz_rgba2[i].rgba);
- }
-
- // RGB != RGBA
- CloudXYZRGB cloud_xyz_rgb;
- cloud_xyz_rgb.push_back (pt_xyz_rgb);
- cloud_xyz_rgb.push_back (pt_xyz_rgb);
-
- pcl::toPCLPointCloud2 (cloud_xyz_rgb, cloud2);
- pcl::concatenatePointCloud (cloud1, cloud2, cloud_out2);
-
- pcl::fromPCLPointCloud2 (cloud_out2, cloud_all);
-
- EXPECT_EQ (cloud_all.size (), cloud_xyz_rgba.size () + cloud_xyz_rgba2.size ());
- for (int i = 0; i < int (cloud_xyz_rgba.size ()); ++i)
- {
- EXPECT_XYZ_EQ (cloud_all[i], cloud_xyz_rgba[i]);
- EXPECT_RGB_EQ (cloud_all[i], cloud_xyz_rgba[i]);
- EXPECT_EQ (cloud_all[i].rgba, cloud_xyz_rgba[i].rgba);
- }
- for (int i = 0; i < int (cloud_xyz_rgb.size ()); ++i)
- {
- EXPECT_XYZ_EQ (cloud_all[cloud_xyz_rgba.size () + i], cloud_xyz_rgb[i]);
- EXPECT_RGB_EQ (cloud_all[cloud_xyz_rgba.size () + i], cloud_xyz_rgb[i]);
- EXPECT_EQ (cloud_all[cloud_xyz_rgba.size () + i].rgba, cloud_xyz_rgb[i].rgba);
- }
-
- // _ vs regular
- int rgb_idx = pcl::getFieldIndex (cloud1, "rgba");
- cloud1.fields[rgb_idx].name = "_";
- pcl::concatenatePointCloud (cloud1, cloud2, cloud_out3);
-
- pcl::fromPCLPointCloud2 (cloud_out3, cloud_all);
-
- EXPECT_EQ (cloud_all.size (), cloud_xyz_rgba.size () + cloud_xyz_rgba2.size ());
- for (int i = 0; i < int (cloud_xyz_rgba.size ()); ++i)
- {
- EXPECT_XYZ_EQ (cloud_all[i], cloud_xyz_rgba[i]);
- // Data doesn't get modified
- EXPECT_RGB_EQ (cloud_all[i], cloud_xyz_rgba[i]);
- EXPECT_EQ (cloud_all[i].rgba, cloud_xyz_rgba[i].rgba);
- }
- for (int i = 0; i < int (cloud_xyz_rgb.size ()); ++i)
- EXPECT_XYZ_EQ (cloud_all[cloud_xyz_rgba.size () + i], cloud_xyz_rgb[i]);
-
- cloud1.fields[rgb_idx].name = "rgba";
- // regular vs _
- rgb_idx = pcl::getFieldIndex (cloud2, "rgb");
- cloud2.fields[rgb_idx].name = "_";
- pcl::concatenatePointCloud (cloud1, cloud2, cloud_out4);
-
- pcl::fromPCLPointCloud2 (cloud_out4, cloud_all);
-
- EXPECT_EQ (cloud_all.size (), cloud_xyz_rgba.size () + cloud_xyz_rgba2.size ());
- for (int i = 0; i < int (cloud_xyz_rgba.size ()); ++i)
- {
- EXPECT_XYZ_EQ (cloud_all[i], cloud_xyz_rgba[i]);
- // Data doesn't get modified
- EXPECT_RGB_EQ (cloud_all[i], cloud_xyz_rgba[i]);
- EXPECT_EQ (cloud_all[i].rgba, cloud_xyz_rgba[i].rgba);
- }
- for (int i = 0; i < int (cloud_xyz_rgb.size ()); ++i)
- {
- EXPECT_XYZ_EQ (cloud_all[cloud_xyz_rgba.size () + i], cloud_xyz_rgb[i]);
- EXPECT_FLOAT_EQ (cloud_all[cloud_xyz_rgba.size () + i].r, 0);
- EXPECT_FLOAT_EQ (cloud_all[cloud_xyz_rgba.size () + i].g, 0);
- EXPECT_FLOAT_EQ (cloud_all[cloud_xyz_rgba.size () + i].b, 0);
- EXPECT_EQ (cloud_all[cloud_xyz_rgba.size () + i].rgba, 0);
- }
-
- // _ vs _
- rgb_idx = pcl::getFieldIndex (cloud1, "rgba");
- cloud1.fields[rgb_idx].name = "_";
- pcl::toPCLPointCloud2 (cloud_xyz_rgb, cloud2);
- rgb_idx = pcl::getFieldIndex (cloud2, "rgb");
- cloud2.fields[rgb_idx].name = "_";
-
- pcl::concatenatePointCloud (cloud1, cloud2, cloud_out3);
-
- pcl::fromPCLPointCloud2 (cloud_out3, cloud_all);
-
- EXPECT_EQ (cloud_all.size (), cloud_xyz_rgba.size () + cloud_xyz_rgba2.size ());
- for (int i = 0; i < int (cloud_xyz_rgba.size ()); ++i)
- {
- EXPECT_XYZ_EQ (cloud_all[i], cloud_xyz_rgba[i]);
- // Data doesn't get modified
- EXPECT_RGB_EQ (cloud_all[i], cloud_xyz_rgba[i]);
- EXPECT_EQ (cloud_all[i].rgba, cloud_xyz_rgba[i].rgba);
- }
- for (int i = 0; i < int (cloud_xyz_rgb.size ()); ++i)
- {
- EXPECT_XYZ_EQ (cloud_all[cloud_xyz_rgba.size () + i], cloud_xyz_rgb[i]);
- EXPECT_FLOAT_EQ (cloud_all[cloud_xyz_rgba.size () + i].r, 0);
- EXPECT_FLOAT_EQ (cloud_all[cloud_xyz_rgba.size () + i].g, 0);
- EXPECT_FLOAT_EQ (cloud_all[cloud_xyz_rgba.size () + i].b, 0);
- EXPECT_EQ (cloud_all[cloud_xyz_rgba.size () + i].rgba, 0);
- }
-
- cloud1.fields[rgb_idx].name = "rgba";
- // _ vs regular
- rgb_idx = pcl::getFieldIndex (cloud1, "rgba");
-
- cloud1.fields[rgb_idx].name = "_";
- pcl::toPCLPointCloud2 (cloud_xyz_rgb, cloud2);
- pcl::concatenatePointCloud (cloud2, cloud1, cloud_out3);
-
- pcl::fromPCLPointCloud2 (cloud_out3, cloud_all);
-
- EXPECT_EQ (cloud_all.size (), cloud_xyz_rgba.size () + cloud_xyz_rgba2.size ());
- for (int i = 0; i < int (cloud_xyz_rgb.size ()); ++i)
- {
- EXPECT_XYZ_EQ (cloud_all[i], cloud_xyz_rgb[i]);
- // Data doesn't get modified
- EXPECT_RGB_EQ (cloud_all[i], cloud_xyz_rgb[i]);
- EXPECT_EQ (cloud_all[i].rgba, cloud_xyz_rgb[i].rgba);
- }
- for (int i = 0; i < int (cloud_xyz_rgba.size ()); ++i)
- {
- EXPECT_XYZ_EQ (cloud_all[cloud_xyz_rgb.size () + i], cloud_xyz_rgba[i]);
- EXPECT_FLOAT_EQ (cloud_all[cloud_xyz_rgb.size () + i].r, 0);
- EXPECT_FLOAT_EQ (cloud_all[cloud_xyz_rgb.size () + i].g, 0);
- EXPECT_FLOAT_EQ (cloud_all[cloud_xyz_rgb.size () + i].b, 0);
- EXPECT_EQ (cloud_all[cloud_xyz_rgb.size () + i].rgba, 0);
- }
-
- cloud1.fields[rgb_idx].name = "rgba";
- // regular vs _
- rgb_idx = pcl::getFieldIndex (cloud2, "rgb");
- cloud2.fields[rgb_idx].name = "_";
- pcl::concatenatePointCloud (cloud2, cloud1, cloud_out4);
-
- pcl::fromPCLPointCloud2 (cloud_out4, cloud_all);
-
- EXPECT_EQ (cloud_all.size (), cloud_xyz_rgba.size () + cloud_xyz_rgba2.size ());
- for (int i = 0; i < int (cloud_xyz_rgb.size ()); ++i)
- {
- EXPECT_XYZ_EQ (cloud_all[i], cloud_xyz_rgb[i]);
- // Data doesn't get modified
- EXPECT_RGB_EQ (cloud_all[i], cloud_xyz_rgb[i]);
- EXPECT_EQ (cloud_all[i].rgba, cloud_xyz_rgb[i].rgba);
- }
- for (int i = 0; i < int (cloud_xyz_rgba.size ()); ++i)
- {
- EXPECT_XYZ_EQ (cloud_all[cloud_xyz_rgb.size () + i], cloud_xyz_rgba[i]);
- EXPECT_FLOAT_EQ (cloud_all[cloud_xyz_rgb.size () + i].r, 0);
- EXPECT_FLOAT_EQ (cloud_all[cloud_xyz_rgb.size () + i].g, 0);
- EXPECT_FLOAT_EQ (cloud_all[cloud_xyz_rgb.size () + i].b, 0);
- EXPECT_EQ (cloud_all[cloud_xyz_rgb.size () + i].rgba, 0);
- }
-}
-#pragma GCC diagnostic pop
-#ifdef _MSC_VER
-#pragma warning(pop)
-#endif
-
///////////////////////////////////////////////////////////////////////////////////////////
TEST (PCL, concatenatePointCloud2)
{
*/
#include <pcl/test/gtest.h>
-#include <pcl/common/common.h>
#include <pcl/common/intersections.h>
#include <pcl/pcl_tests.h>
--- /dev/null
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Point Cloud Library (PCL) - www.pointclouds.org
+ * Copyright (c) 2014-, Open Perception Inc.
+ *
+ * All rights reserved
+ */
+
+#include <pcl/test/gtest.h>
+#include <pcl/pcl_macros.h>
+#include <pcl/pcl_tests.h>
+#include <pcl/point_types.h>
+#include <pcl/type_traits.h>
+
+using namespace pcl;
+using namespace pcl::test;
+
+template <typename T> class PointTypeStaticMemberFunctionsTest : public testing::Test { };
+using FeaturePointTypes = ::testing::Types<BOOST_PP_SEQ_ENUM (PCL_DESCRIPTOR_FEATURE_POINT_TYPES), Histogram<1>>;
+TYPED_TEST_SUITE (PointTypeStaticMemberFunctionsTest, FeaturePointTypes);
+TYPED_TEST (PointTypeStaticMemberFunctionsTest, DescriptorSizeTests)
+{
+ static_assert
+ (
+ TypeParam::descriptorSize() == pcl::detail::traits::descriptorSize_v<TypeParam>,
+ "incorrect descriptorSize"
+ );
+}
+
+int
+main (int argc, char** argv)
+{
+ testing::InitGoogleTest (&argc, argv);
+ return (RUN_ALL_TESTS ());
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Point Cloud Library (PCL) - www.pointclouds.org
+ * Copyright (c) 2021-, Open Perception
+ *
+ * All rights reserved
+ */
+
+#include <pcl/test/gtest.h>
+#include <pcl/pcl_tests.h>
+#include <pcl/point_types.h>
+#include <pcl/point_cloud.h>
+
+using namespace pcl;
+
+//////////////////////////////////////////////
+struct pointCloudTest : public testing::Test {
+ protected:
+ PointCloud<PointXYZ> cloud;
+};
+
+TEST_F (pointCloudTest, is_organized)
+{
+ cloud.width = 640;
+ cloud.height = 480;
+ EXPECT_TRUE (cloud.isOrganized ());
+}
+
+TEST_F (pointCloudTest, not_organized)
+{
+ cloud.width = 640;
+ cloud.height = 1;
+ EXPECT_FALSE (cloud.isOrganized ());
+}
+
+TEST_F (pointCloudTest, getMatrixXfMap)
+{
+ cloud.height = 1;
+ cloud.width = 10;
+ for (std::uint32_t i = 0; i < cloud.width*cloud.height; ++i)
+ {
+ float j = static_cast<float> (i);
+ cloud.emplace_back(3.0f * j + 0.0f, 3.0f * j + 1.0f, 3.0f * j + 2.0f);
+ }
+
+ Eigen::MatrixXf mat_xyz1 = cloud.getMatrixXfMap ();
+ Eigen::MatrixXf mat_xyz = cloud.getMatrixXfMap (3, 4, 0);
+
+ if (Eigen::MatrixXf::Flags & Eigen::RowMajorBit)
+ {
+ EXPECT_EQ (mat_xyz1.cols (), 4);
+ EXPECT_EQ (mat_xyz1.rows (), cloud.width);
+ EXPECT_EQ (mat_xyz1 (0, 0), 0);
+ EXPECT_EQ (mat_xyz1 (cloud.width - 1, 2), 3 * cloud.width - 1); // = 29
+
+ EXPECT_EQ (mat_xyz.cols (), 3);
+ EXPECT_EQ (mat_xyz.rows (), cloud.width);
+ EXPECT_EQ (mat_xyz (0, 0), 0);
+ EXPECT_EQ (mat_xyz (cloud.width - 1, 2), 3 * cloud.width - 1); // = 29
+ }
+ else
+ {
+ EXPECT_EQ (mat_xyz1.cols (), cloud.width);
+ EXPECT_EQ (mat_xyz1.rows (), 4);
+ EXPECT_EQ (mat_xyz1 (0, 0), 0);
+ EXPECT_EQ (mat_xyz1 (2, cloud.width - 1), 3 * cloud.width - 1); // = 29
+
+ EXPECT_EQ (mat_xyz.cols (), cloud.width);
+ EXPECT_EQ (mat_xyz.rows (), 3);
+ EXPECT_EQ (mat_xyz (0, 0), 0);
+ EXPECT_EQ (mat_xyz (2, cloud.width - 1), 3 * cloud.width - 1); // = 29
+ }
+
+#ifdef NDEBUG
+ if (Eigen::MatrixXf::Flags & Eigen::RowMajorBit)
+ {
+ Eigen::MatrixXf mat_yz = cloud.getMatrixXfMap (2, 4, 1);
+ EXPECT_EQ (mat_yz.cols (), 2);
+ EXPECT_EQ (mat_yz.rows (), cloud.width);
+ EXPECT_EQ (mat_yz (0, 0), 1);
+ EXPECT_EQ (mat_yz (cloud.width - 1, 1), 3 * cloud.width - 1);
+ std::uint32_t j = 1;
+ for (std::uint32_t i = 1; i < cloud.width*cloud.height; i+=4, j+=3)
+ {
+ Eigen::MatrixXf mat_yz = cloud.getMatrixXfMap (2, 4, i);
+ EXPECT_EQ (mat_yz.cols (), 2);
+ EXPECT_EQ (mat_yz.rows (), cloud.width);
+ EXPECT_EQ (mat_yz (0, 0), j);
+ }
+ }
+ else
+ {
+ Eigen::MatrixXf mat_yz = cloud.getMatrixXfMap (2, 4, 1);
+ EXPECT_EQ (mat_yz.cols (), cloud.width);
+ EXPECT_EQ (mat_yz.rows (), 2);
+ EXPECT_EQ (mat_yz (0, 0), 1);
+ EXPECT_EQ (mat_yz (1, cloud.width - 1), 3 * cloud.width - 1);
+ std::uint32_t j = 1;
+ for (std::uint32_t i = 1; i < cloud.width*cloud.height; i+=4, j+=3)
+ {
+ Eigen::MatrixXf mat_yz = cloud.getMatrixXfMap (2, 4, i);
+ EXPECT_EQ (mat_yz.cols (), cloud.width);
+ EXPECT_EQ (mat_yz.rows (), 2);
+ EXPECT_EQ (mat_yz (0, 0), j);
+ }
+ }
+#endif
+}
+
+TEST_F (pointCloudTest, clear)
+{
+ cloud.insert (cloud.end (), PointXYZ (1, 1, 1));
+ EXPECT_EQ (cloud.size(), 1);
+ cloud.clear ();
+ EXPECT_EQ (cloud.width, 0);
+ EXPECT_EQ (cloud.height, 0);
+}
+
+TEST_F (pointCloudTest, insert)
+{
+ cloud.insert (cloud.end (), PointXYZ (1, 1, 1));
+ EXPECT_FALSE (cloud.isOrganized ());
+ EXPECT_EQ (cloud.width, 1);
+}
+
+TEST_F (pointCloudTest, insert_with_height)
+{
+ cloud.insert (cloud.end (), 5, PointXYZ (1, 1, 1));
+ EXPECT_FALSE (cloud.isOrganized ());
+ EXPECT_EQ (cloud.width, 5);
+}
+
+TEST_F (pointCloudTest, erase_at_position)
+{
+ cloud.insert (cloud.end (), 5, PointXYZ (1, 1, 1));
+ cloud.erase (cloud.end () - 1);
+ EXPECT_FALSE (cloud.isOrganized ());
+ EXPECT_EQ (cloud.width, 4);
+}
+
+TEST_F (pointCloudTest, erase_with_iterator)
+{
+ cloud.insert (cloud.end (), 5, PointXYZ (1, 1, 1));
+ cloud.erase (cloud.begin (), cloud.end ());
+ EXPECT_FALSE (cloud.isOrganized ());
+ EXPECT_EQ (cloud.width, 0);
+}
+
+TEST_F (pointCloudTest, emplace)
+{
+ cloud.emplace (cloud.end (), 1, 1, 1);
+ EXPECT_FALSE (cloud.isOrganized ());
+ EXPECT_EQ (cloud.width, 1);
+}
+
+TEST_F (pointCloudTest, emplace_back)
+{
+ auto& new_point = cloud.emplace_back (1, 1, 1);
+ EXPECT_FALSE (cloud.isOrganized ());
+ EXPECT_EQ (cloud.width, 1);
+ EXPECT_EQ (&new_point, &cloud.back ());
+}
+
+TEST_F (pointCloudTest, resize_with_count_elements)
+{
+ cloud.resize (640*360);
+ EXPECT_FALSE (cloud.isOrganized ());
+ EXPECT_EQ (cloud.width, 640*360);
+}
+
+TEST_F (pointCloudTest, resize_with_new_width_and_height)
+{
+ cloud.resize (640, 480);
+ EXPECT_TRUE (cloud.isOrganized ());
+ EXPECT_EQ (cloud.width, 640);
+ EXPECT_EQ (cloud.height, 480);
+}
+
+TEST_F (pointCloudTest, resize_with_initialized_count_elements)
+{
+ cloud.resize (640*360, PointXYZ (1, 1, 1));
+ EXPECT_FALSE (cloud.isOrganized ());
+ EXPECT_EQ (cloud.width, 640*360);
+}
+
+TEST_F (pointCloudTest, resize_with_initialized_count_and_new_width_and_height)
+{
+ cloud.resize (640, 480, PointXYZ (1, 1, 1));
+ EXPECT_TRUE (cloud.isOrganized ());
+ EXPECT_EQ (cloud.width, 640);
+}
+
+TEST_F (pointCloudTest, assign_with_copies)
+{
+ cloud.assign (640*360, PointXYZ (1, 1, 1));
+ EXPECT_FALSE (cloud.isOrganized ());
+ EXPECT_EQ (cloud.width, 640*360);
+}
+
+TEST_F (pointCloudTest, assign_with_new_width_and_height_copies)
+{
+ cloud.assign(640, 480, PointXYZ (1, 1, 1));
+ EXPECT_TRUE (cloud.isOrganized ());
+ EXPECT_EQ (cloud.width, 640);
+}
+
+TEST_F (pointCloudTest, assign_with_copies_in_range)
+{
+ std::vector<PointXYZ> pointVec;
+ pointVec.resize (640*360, PointXYZ (2, 3, 4));
+ cloud.assign (pointVec.begin(), pointVec.end());
+ EXPECT_FALSE (cloud.isOrganized ());
+ EXPECT_EQ (cloud.width, 640*360);
+}
+
+TEST_F (pointCloudTest, assign_with_copies_in_range_and_new_width)
+{
+ std::vector<PointXYZ> pointVec;
+ pointVec.resize (640*360, PointXYZ (2, 3, 4));
+ cloud.assign (pointVec.begin(), pointVec.end(), 640);
+ EXPECT_TRUE (cloud.isOrganized ());
+ EXPECT_EQ (cloud.width, 640);
+}
+
+TEST_F (pointCloudTest, assign_mismatch_size_and_width_height)
+{
+ std::vector<PointXYZ> pointVec;
+ pointVec.resize (640*480, PointXYZ (7, 7, 7));
+ cloud.assign (pointVec.begin(), pointVec.end(), 460);
+ EXPECT_FALSE (cloud.isOrganized ());
+ EXPECT_EQ (cloud.width, 640*480);
+}
+
+TEST_F (pointCloudTest, assign_initializer_list)
+{
+ cloud.assign ({PointXYZ (3, 4, 5), PointXYZ (3, 4, 5), PointXYZ (3, 4, 5)});
+ EXPECT_FALSE (cloud.isOrganized ());
+ EXPECT_EQ (cloud.width, 3);
+}
+
+TEST_F (pointCloudTest, assign_initializer_list_with_new_width)
+{
+ cloud.assign ({PointXYZ (3, 4, 5), PointXYZ (3, 4, 5), PointXYZ (3, 4, 5), PointXYZ (3, 4, 5)}, 2);
+ EXPECT_TRUE (cloud.isOrganized ());
+ EXPECT_EQ (cloud.width, 2);
+}
+
+TEST_F (pointCloudTest, assign_initializer_list_with_unorganized_cloud)
+{
+ cloud.assign ({PointXYZ (3, 4, 5), PointXYZ (3, 4, 5), PointXYZ (3, 4, 5)}, 6);
+ EXPECT_FALSE (cloud.isOrganized ());
+ EXPECT_EQ (cloud.width, 3);
+}
+
+TEST_F (pointCloudTest, push_back_to_unorganized_cloud)
+{
+ cloud.push_back (PointXYZ (3, 4, 5));
+ EXPECT_FALSE (cloud.isOrganized ());
+ EXPECT_EQ (cloud.width, 1);
+}
+
+TEST_F (pointCloudTest, push_back_to_organized_cloud)
+{
+ cloud.resize (80, 80, PointXYZ (1, 1, 1));
+ EXPECT_TRUE (cloud.isOrganized ());
+ cloud.push_back (PointXYZ (3, 4, 5));
+ EXPECT_EQ (cloud.width, (80*80) + 1);
+}
+
+/////////////////////////////////////////////////
+struct organizedPointCloudTest : public pointCloudTest {
+ protected:
+ void SetUp() override
+ {
+ cloud.resize (640, 480, PointXYZ (1, 1, 1));
+ }
+};
+
+TEST_F (organizedPointCloudTest, transient_push_back)
+{
+ cloud.transient_push_back (PointXYZ(2, 2, 2));
+ EXPECT_TRUE (cloud.isOrganized ());
+ EXPECT_EQ (cloud.width, 640);
+ EXPECT_EQ (cloud.size(), (640*480) + 1);
+}
+
+TEST_F (organizedPointCloudTest, transient_emplace_back)
+{
+ auto& new_pointXYZ = cloud.transient_emplace_back (3, 3, 3);
+ EXPECT_TRUE (cloud.isOrganized ());
+ EXPECT_EQ (cloud.width, 640);
+ EXPECT_EQ (cloud.size(), (640*480) + 1);
+ EXPECT_EQ (&new_pointXYZ, &cloud.back ());
+}
+
+TEST_F (organizedPointCloudTest, transient_insert_one_element)
+{
+ cloud.transient_insert (cloud.end (), PointXYZ (1, 1, 1));
+ EXPECT_TRUE (cloud.isOrganized ());
+ EXPECT_EQ (cloud.size(), (640*480) + 1);
+ EXPECT_EQ (cloud.width, 640);
+}
+
+TEST_F (organizedPointCloudTest, transient_insert_with_n_elements)
+{
+ cloud.transient_insert (cloud.end (), 10, PointXYZ (1, 1, 1));
+ EXPECT_TRUE (cloud.isOrganized ());
+ EXPECT_EQ (cloud.size(), (640*480) + 10);
+ EXPECT_EQ (cloud.width, 640);
+}
+
+TEST_F (organizedPointCloudTest, transient_emplace)
+{
+ cloud.transient_emplace (cloud.end (), 4, 4, 4);
+ EXPECT_TRUE (cloud.isOrganized ());
+ EXPECT_EQ (cloud.width, 640);
+ EXPECT_EQ (cloud.size(), (640*480) + 1);
+}
+
+TEST_F (organizedPointCloudTest, transient_erase_at_position)
+{
+ cloud.transient_erase (cloud.end () - 1);
+ EXPECT_TRUE (cloud.isOrganized ());
+ EXPECT_EQ (cloud.width, 640);
+ EXPECT_EQ (cloud.size(), (640*480) - 1);
+}
+
+TEST_F (organizedPointCloudTest, transient_erase_with_iterator)
+{
+ cloud.transient_erase (cloud.begin (), cloud.end ());
+ EXPECT_TRUE (cloud.isOrganized ());
+ EXPECT_EQ (cloud.width, 640);
+ EXPECT_EQ (cloud.size(), 0);
+}
+
+TEST_F (organizedPointCloudTest, unorganized_concatenate)
+{
+ PointCloud<PointXYZ> new_unorganized_cloud;
+ PointCloud<PointXYZ>::concatenate (new_unorganized_cloud, cloud);
+ EXPECT_FALSE (new_unorganized_cloud.isOrganized ());
+ EXPECT_EQ (new_unorganized_cloud.width, 640*480);
+}
+
+TEST_F (organizedPointCloudTest, unorganized_concatenate_with_argument_return)
+{
+ PointCloud<PointXYZ> new_unorganized_cloud;
+ PointCloud<PointXYZ>::concatenate (new_unorganized_cloud, cloud);
+ PointCloud<PointXYZ> unorganized_cloud_out;
+ PointCloud<PointXYZ>::concatenate (new_unorganized_cloud, cloud, unorganized_cloud_out);
+ EXPECT_FALSE (unorganized_cloud_out.isOrganized ());
+ EXPECT_EQ (unorganized_cloud_out.width, 640*480*2);
+}
+
+TEST_F (organizedPointCloudTest, unorganized_concatenate_with_assignment_return)
+{
+ PointCloud<PointXYZ> unorganized_cloud;
+ PointCloud<PointXYZ>::concatenate (unorganized_cloud, cloud);
+ PointCloud<PointXYZ> unorganized_cloud_out = cloud + unorganized_cloud;
+ EXPECT_FALSE (unorganized_cloud_out.isOrganized ());
+ EXPECT_EQ (unorganized_cloud_out.width, 640*480*2);
+}
+
+TEST_F (organizedPointCloudTest, unorganized_concatenate_with_plus_operator)
+{
+ PointCloud<PointXYZ> unorganized_cloud;
+ unorganized_cloud += cloud;
+ EXPECT_FALSE (unorganized_cloud.isOrganized ());
+ EXPECT_EQ (unorganized_cloud.width, 640*480);
+}
+
+TEST_F (organizedPointCloudTest, at_with_throw)
+{
+ PointCloud<PointXYZ> unorganized_cloud;
+ unorganized_cloud += cloud;
+ EXPECT_THROW({unorganized_cloud.at (5, 5);}, UnorganizedPointCloudException);
+}
+
+TEST_F (organizedPointCloudTest, at_no_throw)
+{
+ const auto& point_at = cloud.at (cloud.width - 1, cloud.height - 1);
+ EXPECT_EQ(&point_at, &cloud.back());
+}
+
+TEST_F (organizedPointCloudTest, organized_concatenate)
+{
+ PointCloud<PointXYZ> organized_cloud1 = cloud;
+ PointCloud<PointXYZ> organized_cloud2 = cloud;
+ EXPECT_TRUE (organized_cloud1.isOrganized ());
+ EXPECT_TRUE (organized_cloud2.isOrganized ());
+ PointCloud<PointXYZ> organized_cloud_out = organized_cloud1 + organized_cloud2;
+ std::size_t total_size = organized_cloud1.size() + organized_cloud2.size();
+ EXPECT_FALSE (organized_cloud_out.isOrganized ());
+ EXPECT_EQ(total_size, 614400);
+ EXPECT_EQ (organized_cloud_out.width, total_size);
+}
+
+/* ---[ */
+int
+main (int argc, char** argv)
+{
+ testing::InitGoogleTest (&argc, argv);
+ return (RUN_ALL_TESTS ());
+}
+/* ]--- */
cloud_template.height = 480;
for (std::uint32_t i = 0; i < size; ++i)
{
- cloud_template.points.emplace_back(3.0f * static_cast<float>(i) + 0,
+ cloud_template.emplace_back(3.0f * static_cast<float>(i) + 0,
3.0f * static_cast<float> (i) + 1,
3.0f * static_cast<float> (i) + 2);
}
TEST(PolygonMesh, concatenate_vertices)
{
+ const std::size_t size = 15;
+
PolygonMesh test, dummy;
- test.cloud.width = 10;
- test.cloud.height = 5;
+ // The algorithm works regardless of the organization.
+ test.cloud.width = dummy.cloud.width = size;
+ test.cloud.height = dummy.cloud.height = 1;
- const std::size_t size = 15;
for (std::size_t i = 0; i < size; ++i)
{
dummy.polygons.emplace_back();
EXPECT_EQ(2 * dummy.polygons.size(), test.polygons.size());
const auto cloud_size = test.cloud.width * test.cloud.height;
- for (std::size_t i = 0; i < dummy.polygons.size(); ++i)
- {
- EXPECT_EQ(dummy.polygons[i].vertices.size(), test.polygons[i].vertices.size());
- EXPECT_EQ(dummy.polygons[i].vertices.size(),
- test.polygons[i + dummy.polygons.size()].vertices.size());
- for (std::size_t j = 0; j < size; ++j)
- {
- EXPECT_EQ(dummy.polygons[i].vertices[j],
- test.polygons[i].vertices[j]);
- EXPECT_EQ(dummy.polygons[i].vertices[j] + cloud_size,
- test.polygons[i + dummy.polygons.size()].vertices[j]);
- }
+ for (const auto& polygon : test.polygons)
+ for (const auto& vertex : polygon.vertices)
+ EXPECT_LT(vertex, cloud_size);
+
+ pcl::Indices vertices(size);
+ for (std::size_t i = 0; i < size; ++i) {
+ vertices = dummy.polygons[i].vertices;
+ EXPECT_EQ_VECTORS(vertices, test.polygons[i].vertices);
+ for (auto& vertex : vertices) { vertex += size; }
+ EXPECT_EQ_VECTORS(vertices, test.polygons[i + size].vertices);
}
}
EXPECT_NEAR (pt.z, ct[0].z, 1e-4);
}
+TEST (PCL, OrganizedTransform)
+{
+ const Eigen::Matrix4f transform=Eigen::Matrix4f::Identity();
+ // test if organized point cloud is still organized after transformPointCloud
+ pcl::PointCloud<PointXYZ> cloud_a, cloud_b, cloud_c;
+ cloud_a.resize (12);
+ cloud_a.width=4;
+ cloud_a.height=3;
+ pcl::transformPointCloud (cloud_a, cloud_b, transform, true);
+ EXPECT_EQ (cloud_a.width , cloud_b.width );
+ EXPECT_EQ (cloud_a.height, cloud_b.height);
+ pcl::transformPointCloud (cloud_a, cloud_c, transform, false);
+ EXPECT_EQ (cloud_a.width , cloud_c.width );
+ EXPECT_EQ (cloud_a.height, cloud_c.height);
+
+ // test if organized point cloud is still organized after transformPointCloudWithNormals
+ pcl::PointCloud<PointNormal> cloud_d, cloud_e, cloud_f;
+ cloud_d.resize (10);
+ cloud_d.width=2;
+ cloud_d.height=5;
+ pcl::transformPointCloudWithNormals (cloud_d, cloud_e, transform, true);
+ EXPECT_EQ (cloud_d.width , cloud_e.width );
+ EXPECT_EQ (cloud_d.height, cloud_e.height);
+ pcl::transformPointCloudWithNormals (cloud_d, cloud_f, transform, false);
+ EXPECT_EQ (cloud_d.width , cloud_f.width );
+ EXPECT_EQ (cloud_d.height, cloud_f.height);
+}
+
/* ---[ */
int
main (int argc, char** argv)
TEST (PointCloud, size)
{
- EXPECT_EQ(cloud.points.size (), cloud.size ());
+ EXPECT_EQ(cloud.size (), cloud.size ());
}
TEST (PointCloud, sq_brackets_wrapper)
TEST (PointCloud, at)
{
for (std::uint32_t i = 0; i < size; ++i)
- EXPECT_EQ_VECTORS (cloud.points.at (i).getVector3fMap (),
+ EXPECT_EQ_VECTORS (cloud.at (i).getVector3fMap (),
cloud.at (i).getVector3fMap ());
}
TEST (PointCloud, front)
{
- EXPECT_EQ_VECTORS (cloud.points.front ().getVector3fMap (),
+ EXPECT_EQ_VECTORS (cloud.front ().getVector3fMap (),
cloud.front ().getVector3fMap ());
}
TEST (PointCloud, back)
{
- EXPECT_EQ_VECTORS (cloud.points.back ().getVector3fMap (),
+ EXPECT_EQ_VECTORS (cloud.back ().getVector3fMap (),
cloud.back ().getVector3fMap ());
}
TEST (PointCloud, iterators)
{
EXPECT_EQ_VECTORS (cloud.begin ()->getVector3fMap (),
- cloud.points.begin ()->getVector3fMap ());
+ cloud.begin ()->getVector3fMap ());
EXPECT_EQ_VECTORS ((--cloud.end ())->getVector3fMap (),
- (--cloud.points.end ())->getVector3fMap ());
+ (--cloud.end ())->getVector3fMap ());
PointCloud<PointXYZ>::const_iterator pit = cloud.begin ();
- PointCloud<PointXYZ>::VectorType::const_iterator pit2 = cloud.points.begin ();
+ PointCloud<PointXYZ>::VectorType::const_iterator pit2 = cloud.begin ();
for (; pit < cloud.end (); ++pit2, ++pit)
EXPECT_EQ_VECTORS (pit->getVector3fMap (), pit2->getVector3fMap ());
}
cloud.width = 10;
cloud.height = 480;
for (std::uint32_t i = 0; i < size; ++i)
- cloud.points.emplace_back(3.0f * static_cast<float>(i) + 0, 3.0f * static_cast<float> (i) + 1, 3.0f * static_cast<float> (i) + 2);
+ cloud.emplace_back(3.0f * static_cast<float>(i) + 0, 3.0f * static_cast<float> (i) + 1, 3.0f * static_cast<float> (i) + 2);
testing::InitGoogleTest (&argc, argv);
return (RUN_ALL_TESTS ());
using KdTreePtr = search::KdTree<PointXYZ>::Ptr;
PointCloud<PointXYZ> cloud;
-std::vector<int> indices;
+pcl::Indices indices;
KdTreePtr tree;
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
using KdTreePtr = search::KdTree<PointXYZ>::Ptr;
PointCloud<PointXYZ> cloud;
-std::vector<int> indices;
+pcl::Indices indices;
KdTreePtr tree;
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
using KdTreePtr = search::KdTree<PointXYZ>::Ptr;
PointCloud<PointXYZ> cloud;
-std::vector<int> indices;
+pcl::Indices indices;
KdTreePtr tree;
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
using KdTreePtr = search::KdTree<PointXYZ>::Ptr;
PointCloud<PointXYZ> cloud;
-std::vector<int> indices;
+pcl::Indices indices;
KdTreePtr tree;
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
using CloudPtr = PointCloud<PointXYZ>::Ptr;
PointCloud<PointXYZ> cloud;
-std::vector<int> indices;
+pcl::Indices indices;
KdTreePtr tree;
CloudPtr cloud_milk;
sampled_cloud.reset (new pcl::PointCloud<pcl::PointXYZ> ());
- std::vector<int> sampled_indices;
+ pcl::Indices sampled_indices;
for (float sa = 0.0f; sa < (float)cloud->size (); sa += sampling_incr)
sampled_indices.push_back (static_cast<int> (sa));
copyPointCloud (*cloud, sampled_indices, *sampled_cloud);
#include <pcl/features/intensity_gradient.h>
using namespace pcl;
-using namespace pcl::io;
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
TEST (PCL, IntensityGradientEstimation)
p.z = 0.1f * powf (x, 2.0f) + 0.5f * y + 1.0f;
p.intensity = 0.1f * powf (x, 3.0f) + 0.2f * powf (y, 2.0f) + 1.0f * p.z + 20000.0f;
- cloud_xyzi.points.push_back (p);
+ cloud_xyzi.push_back (p);
}
}
cloud_xyzi.width = cloud_xyzi.size ();
search::KdTree<PointXYZ>::Ptr tree (new search::KdTree<PointXYZ> ());
PointCloud<PointXYZ>::Ptr cloud (new PointCloud<PointXYZ> ());
-std::vector<int> indices;
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
TEST (PCL, GRSDEstimation)
{
PointCloud<Normal> output;
cloud.height = 1;
- cloud.points.resize (cloud.height * cloud.width);
+ cloud.resize (cloud.height * cloud.width);
ne.setInputCloud (cloud.makeShared ());
ne.setRectSize (3, 3);
ne.setNormalEstimationMethod (ne.SIMPLE_3D_GRADIENT);
{
cloud.width = 640;
cloud.height = 480;
- cloud.points.resize (cloud.width * cloud.height);
+ cloud.resize (cloud.width * cloud.height);
cloud.is_dense = true;
for (std::size_t v = 0; v < cloud.height; ++v)
{
using KdTreePtr = search::KdTree<PointXYZ>::Ptr;
PointCloud<PointXYZ> cloud;
-std::vector<int> indices;
+pcl::Indices indices;
KdTreePtr tree;
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
using KdTreePtr = search::KdTree<PointXYZ>::Ptr;
PointCloud<PointXYZ> cloud;
-std::vector<int> indices;
+pcl::Indices indices;
KdTreePtr tree;
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
class DummySearch : public pcl::search::Search<PointT>
{
public:
- virtual int nearestKSearch (const PointT &point, int k, std::vector<int> &k_indices,
+ virtual int nearestKSearch (const PointT &point, int k, pcl::Indices &k_indices,
std::vector<float> &k_sqr_distances ) const
{
pcl::utils::ignore(point);
return k;
}
- virtual int radiusSearch (const PointT& point, double radius, std::vector<int>& k_indices,
+ virtual int radiusSearch (const PointT& point, double radius, pcl::Indices& k_indices,
std::vector<float>& k_sqr_distances, unsigned int max_nn = 0 ) const
{
pcl::utils::ignore(point, radius, k_indices, k_sqr_distances);
using pcl::PointCloud;
static PointCloud<PointT>::Ptr cloud (new PointCloud<PointT> ());
-static std::vector<int> indices;
+static pcl::Indices indices;
static KdTreePtr tree;
///////////////////////////////////////////////////////////////////////////////////
p.intensity = std::exp ((-powf (x - 3.0f, 2.0f) + powf (y + 2.0f, 2.0f)) / (2.0f * 25.0f)) + std::exp ((-powf (x + 5.0f, 2.0f) + powf (y - 5.0f, 2.0f))
/ (2.0f * 4.0f));
- cloud_xyzi.points.push_back (p);
+ cloud_xyzi.push_back (p);
}
}
cloud_xyzi.width = cloud_xyzi.size ();
gradient.height = 1;
gradient.width = cloud_xyzi.size ();
gradient.is_dense = true;
- gradient.points.resize (gradient.width);
+ gradient.resize (gradient.width);
for (std::size_t i = 0; i < cloud_xyzi.size (); ++i)
{
const PointXYZI &p = cloud_xyzi[i];
search::KdTree<PointXYZ>::Ptr tree (new search::KdTree<PointXYZ> ());
PointCloud<PointXYZ>::Ptr cloud (new PointCloud<PointXYZ> ());
-std::vector<int> indices;
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
TEST (PCL, RSDEstimation)
using KdTreePtr = search::KdTree<PointXYZ>::Ptr;
PointCloud<PointXYZ> cloud;
-std::vector<int> indices;
+pcl::Indices indices;
KdTreePtr tree;
///////////////////////////////////////////////////////////////////////////////////
template<typename PointT> void
-shotCopyPointCloud (const PointCloud<PointT> &cloud_in, const std::vector<int> &indices,
+shotCopyPointCloud (const PointCloud<PointT> &cloud_in, const pcl::Indices &indices,
PointCloud<PointT> &cloud_out)
{
pcl::copyPointCloud<PointT>(cloud_in, indices, cloud_out);
using KdTreePtr = search::KdTree<PointXYZ>::Ptr;
PointCloud<PointXYZ> cloud;
-std::vector<int> indices;
+pcl::Indices indices;
KdTreePtr tree;
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
using KdTreePtr = search::KdTree<PointXYZ>::Ptr;
PointCloud<PointXYZ> cloud;
-std::vector<int> indices;
+pcl::Indices indices;
KdTreePtr tree;
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
p.intensity = std::exp (-(powf (x - 3.0f, 2.0f) + powf (y + 2.0f, 2.0f)) / (2.0f * 25.0f)) + std::exp (-(powf (x + 5.0f, 2.0f) + powf (y - 5.0f, 2.0f))
/ (2.0f * 4.0f));
- cloud_xyzi.points.push_back (p);
+ cloud_xyzi.push_back (p);
}
}
cloud_xyzi.width = cloud_xyzi.size ();
LINK_WITH pcl_gtest pcl_common pcl_filters)
PCL_ADD_TEST(filters_convolution test_convolution
- FILES test_convolution.cpp
- LINK_WITH pcl_gtest pcl_filters)
+ FILES test_convolution.cpp
+ LINK_WITH pcl_gtest pcl_filters)
+
+PCL_ADD_TEST(filters_crop_hull test_crop_hull
+ FILES test_crop_hull.cpp
+ LINK_WITH pcl_gtest pcl_filters)
if(BUILD_io)
PCL_ADD_TEST(filters_bilateral test_filters_bilateral
#include <pcl/filters/crop_box.h>
#include <pcl/filters/extract_indices.h>
-#include <pcl/common/transforms.h>
#include <pcl/common/eigen.h>
using namespace pcl;
cropBoxFilter.setMax (max_pt);
// Indices
- std::vector<int> indices;
+ pcl::Indices indices;
cropBoxFilter.filter (indices);
// Cloud
// Should contain all
EXPECT_EQ (int (indices.size ()), 7);
- EXPECT_VECTOR_CONTAINS_ALL (std::vector<int>({1, 2, 3, 5, 6, 7, 8}), indices);
+ EXPECT_VECTOR_CONTAINS_ALL (pcl::Indices({1, 2, 3, 5, 6, 7, 8}), indices);
EXPECT_EQ (int (cloud_out.size ()), 7);
EXPECT_EQ (int (cloud_out.width), 7);
EXPECT_EQ (int (cloud_out.height), 1);
cropBoxFilter.filter (cloud_out);
EXPECT_EQ (int (indices.size ()), 3);
- EXPECT_VECTOR_CONTAINS_ALL (std::vector<int>({1, 2, 7}), indices);
+ EXPECT_VECTOR_CONTAINS_ALL (pcl::Indices({1, 2, 7}), indices);
EXPECT_EQ (int (cloud_out.size ()), 3);
removed_indices = cropBoxFilter.getRemovedIndices ();
EXPECT_EQ (int (removed_indices->size ()), 4);
- EXPECT_VECTOR_CONTAINS_ALL (std::vector<int>({3, 5, 6, 8}), *removed_indices);
+ EXPECT_VECTOR_CONTAINS_ALL (pcl::Indices({3, 5, 6, 8}), *removed_indices);
// Test setNegative
cropBoxFilter.setNegative (true);
cropBoxFilter.filter (indices);
EXPECT_EQ (int (indices.size ()), 4);
- EXPECT_VECTOR_CONTAINS_ALL (std::vector<int>({3, 5, 6, 8}), indices);
+ EXPECT_VECTOR_CONTAINS_ALL (pcl::Indices({3, 5, 6, 8}), indices);
cropBoxFilter.setNegative (false);
cropBoxFilter.filter (cloud_out);
removed_indices = cropBoxFilter.getRemovedIndices ();
EXPECT_EQ (int (removed_indices->size ()), 7);
- EXPECT_VECTOR_DOES_NOT_CONTAIN (std::vector<int>({0, 4}), *removed_indices);
+ EXPECT_VECTOR_DOES_NOT_CONTAIN (pcl::Indices({0, 4}), *removed_indices);
// Test setNegative
cropBoxFilter.setNegative (true);
cropBoxFilter.filter (cloud_out_negative);
cropBoxFilter.filter (indices);
EXPECT_EQ (int (indices.size ()), 7);
- EXPECT_VECTOR_DOES_NOT_CONTAIN(std::vector<int>({0, 4}), indices);
+ EXPECT_VECTOR_DOES_NOT_CONTAIN(pcl::Indices({0, 4}), indices);
cropBoxFilter.setNegative (false);
cropBoxFilter.filter (cloud_out);
cropBoxFilter.filter (cloud_out);
EXPECT_EQ (int (indices.size ()), 1);
- EXPECT_VECTOR_CONTAINS_ALL (std::vector<int>({7}), indices);
+ EXPECT_VECTOR_CONTAINS_ALL (pcl::Indices({7}), indices);
EXPECT_EQ (int (cloud_out.size ()), 1);
EXPECT_EQ (int (cloud_out.width), 1);
EXPECT_EQ (int (cloud_out.height), 1);
removed_indices = cropBoxFilter.getRemovedIndices ();
EXPECT_EQ (int (removed_indices->size ()), 6);
- EXPECT_VECTOR_CONTAINS_ALL (std::vector<int>({1, 2, 3, 5, 6, 8}), *removed_indices);
+ EXPECT_VECTOR_CONTAINS_ALL (pcl::Indices({1, 2, 3, 5, 6, 8}), *removed_indices);
// Test setNegative
cropBoxFilter.setNegative (true);
cropBoxFilter.filter (indices);
EXPECT_EQ (int (indices.size ()), 6);
- EXPECT_VECTOR_CONTAINS_ALL (std::vector<int>({1, 2, 3, 5, 6, 8}), indices);
+ EXPECT_VECTOR_CONTAINS_ALL (pcl::Indices({1, 2, 3, 5, 6, 8}), indices);
cropBoxFilter.setNegative (false);
cropBoxFilter.filter (cloud_out);
cropBoxFilter.filter (cloud_out);
EXPECT_EQ (int (indices.size ()), 1);
- EXPECT_VECTOR_CONTAINS_ALL (indices, std::vector<int>({7}));
+ EXPECT_VECTOR_CONTAINS_ALL (indices, pcl::Indices({7}));
EXPECT_EQ (int (cloud_out.size ()), 1);
EXPECT_EQ (int (cloud_out.width), 1);
EXPECT_EQ (int (cloud_out.height), 1);
removed_indices = cropBoxFilter.getRemovedIndices ();
EXPECT_EQ (int (removed_indices->size ()), 6);
- EXPECT_VECTOR_CONTAINS_ALL (std::vector<int>( {1, 2, 3, 5, 6, 8}), *removed_indices);
+ EXPECT_VECTOR_CONTAINS_ALL (pcl::Indices( {1, 2, 3, 5, 6, 8}), *removed_indices);
// Test setNegative
cropBoxFilter.setNegative (true);
cropBoxFilter.filter (indices);
EXPECT_EQ (int (indices.size ()), 6);
- EXPECT_VECTOR_CONTAINS_ALL (std::vector<int>({1, 2, 3, 5, 6, 8}), indices);
+ EXPECT_VECTOR_CONTAINS_ALL (pcl::Indices({1, 2, 3, 5, 6, 8}), indices);
cropBoxFilter.setNegative (false);
cropBoxFilter.filter (cloud_out);
removed_indices = cropBoxFilter.getRemovedIndices ();
EXPECT_EQ (int (removed_indices->size ()), 7);
- EXPECT_VECTOR_DOES_NOT_CONTAIN (std::vector<int>({0, 4}), *removed_indices);
+ EXPECT_VECTOR_DOES_NOT_CONTAIN (pcl::Indices({0, 4}), *removed_indices);
// Test setNegative
cropBoxFilter.setNegative (true);
cropBoxFilter.filter (indices);
EXPECT_EQ (int (indices.size ()), 7);
- EXPECT_VECTOR_DOES_NOT_CONTAIN (std::vector<int>({0, 4}), indices);
+ EXPECT_VECTOR_DOES_NOT_CONTAIN (pcl::Indices({0, 4}), indices);
// PCLPointCloud2 without indices
// -------------------------------------------------------------------------
cropBoxFilter2.setMax (max_pt);
// Indices
- std::vector<int> indices2;
+ pcl::Indices indices2;
cropBoxFilter2.filter (indices2);
// Cloud
// Should contain all
EXPECT_EQ (int (indices2.size ()), 7);
- EXPECT_VECTOR_CONTAINS_ALL (std::vector<int>({1, 2, 3, 5, 6, 7, 8}), indices2);
+ EXPECT_VECTOR_CONTAINS_ALL (pcl::Indices({1, 2, 3, 5, 6, 7, 8}), indices2);
EXPECT_EQ (int (cloud_out2.width), 7);
EXPECT_EQ (int (cloud_out2.height), 1);
cropBoxFilter2.filter (cloud_out2);
EXPECT_EQ (int (indices2.size ()), 3);
- EXPECT_VECTOR_CONTAINS_ALL (std::vector<int>({1, 2, 7}), indices2);
+ EXPECT_VECTOR_CONTAINS_ALL (pcl::Indices({1, 2, 7}), indices2);
EXPECT_EQ (int (cloud_out2.width*cloud_out2.height), 3);
removed_indices2 = cropBoxFilter2.getRemovedIndices ();
EXPECT_EQ (int (removed_indices2->size ()), 4);
- EXPECT_VECTOR_CONTAINS_ALL (std::vector<int>({3, 5, 6, 8}), *removed_indices2);
+ EXPECT_VECTOR_CONTAINS_ALL (pcl::Indices({3, 5, 6, 8}), *removed_indices2);
// Test setNegative
cropBoxFilter2.setNegative (true);
cropBoxFilter2.filter (indices2);
EXPECT_EQ (int (indices2.size ()), 4);
- EXPECT_VECTOR_CONTAINS_ALL (std::vector<int>({3, 5, 6, 8}), indices2);
+ EXPECT_VECTOR_CONTAINS_ALL (pcl::Indices({3, 5, 6, 8}), indices2);
cropBoxFilter2.setNegative (false);
cropBoxFilter2.filter (cloud_out2);
removed_indices2 = cropBoxFilter2.getRemovedIndices ();
EXPECT_EQ (int (removed_indices2->size ()), 7);
- EXPECT_VECTOR_DOES_NOT_CONTAIN (std::vector<int>({0, 4}), *removed_indices2);
+ EXPECT_VECTOR_DOES_NOT_CONTAIN (pcl::Indices({0, 4}), *removed_indices2);
// Test setNegative
cropBoxFilter2.setNegative (true);
cropBoxFilter2.filter (cloud_out2_negative);
cropBoxFilter2.filter (indices2);
EXPECT_EQ (int (indices2.size ()), 7);
- EXPECT_VECTOR_DOES_NOT_CONTAIN (std::vector<int>({0, 4}), indices2);
+ EXPECT_VECTOR_DOES_NOT_CONTAIN (pcl::Indices({0, 4}), indices2);
cropBoxFilter2.setNegative (false);
cropBoxFilter2.filter (cloud_out2);
cropBoxFilter2.filter (cloud_out2);
EXPECT_EQ (int (indices2.size ()), 1);
- EXPECT_VECTOR_CONTAINS_ALL (std::vector<int>({7}), indices2);
+ EXPECT_VECTOR_CONTAINS_ALL (pcl::Indices({7}), indices2);
EXPECT_EQ (int (cloud_out2.width), 1);
EXPECT_EQ (int (cloud_out2.height), 1);
removed_indices2 = cropBoxFilter2.getRemovedIndices ();
EXPECT_EQ (int (removed_indices2->size ()), 6);
- EXPECT_VECTOR_CONTAINS_ALL (std::vector<int>({1, 2, 3, 5, 6, 8}), *removed_indices2);
+ EXPECT_VECTOR_CONTAINS_ALL (pcl::Indices({1, 2, 3, 5, 6, 8}), *removed_indices2);
// Test setNegative
cropBoxFilter2.setNegative (true);
cropBoxFilter2.filter (indices2);
EXPECT_EQ (int (indices2.size ()), 6);
- EXPECT_VECTOR_CONTAINS_ALL (std::vector<int>({1, 2, 3, 5, 6, 8}), indices2);
+ EXPECT_VECTOR_CONTAINS_ALL (pcl::Indices({1, 2, 3, 5, 6, 8}), indices2);
cropBoxFilter2.setNegative (false);
cropBoxFilter2.filter (cloud_out2);
cropBoxFilter2.filter (cloud_out2);
EXPECT_EQ (int (indices2.size ()), 1);
- EXPECT_VECTOR_CONTAINS_ALL (std::vector<int>({7}), indices2);
+ EXPECT_VECTOR_CONTAINS_ALL (pcl::Indices({7}), indices2);
EXPECT_EQ (int (cloud_out2.width), 1);
EXPECT_EQ (int (cloud_out2.height), 1);
removed_indices = cropBoxFilter.getRemovedIndices ();
EXPECT_EQ (int (removed_indices2->size ()), 6);
- EXPECT_VECTOR_CONTAINS_ALL (std::vector<int>( {1, 2, 3, 5, 6, 8}), *removed_indices2);
+ EXPECT_VECTOR_CONTAINS_ALL (pcl::Indices({1, 2, 3, 5, 6, 8}), *removed_indices2);
// Test setNegative
cropBoxFilter2.setNegative (true);
cropBoxFilter2.filter (indices2);
EXPECT_EQ (int (indices2.size ()), 6);
- EXPECT_VECTOR_CONTAINS_ALL (std::vector<int>({1, 2, 3, 5, 6, 8}), indices2);
+ EXPECT_VECTOR_CONTAINS_ALL (pcl::Indices({1, 2, 3, 5, 6, 8}), indices2);
cropBoxFilter2.setNegative (false);
cropBoxFilter2.filter (cloud_out2);
removed_indices2 = cropBoxFilter2.getRemovedIndices ();
EXPECT_EQ (int (removed_indices2->size ()), 7);
- EXPECT_VECTOR_DOES_NOT_CONTAIN (std::vector<int>({0, 4}), *removed_indices2);
+ EXPECT_VECTOR_DOES_NOT_CONTAIN (pcl::Indices({0, 4}), *removed_indices2);
// Test setNegative
cropBoxFilter2.setNegative (true);
cropBoxFilter2.filter (indices2);
EXPECT_EQ (int (indices2.size ()), 7);
- EXPECT_VECTOR_DOES_NOT_CONTAIN (std::vector<int>({0, 4}), indices2);
+ EXPECT_VECTOR_DOES_NOT_CONTAIN (pcl::Indices({0, 4}), indices2);
}
--- /dev/null
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Point Cloud Library (PCL) - www.pointclouds.org
+ * Copyright (c) 2010-2011, Willow Garage, Inc.
+ * Copyright (c) 2012-, Open Perception, Inc.
+ *
+ */
+
+#include <random>
+#include <algorithm>
+#include <array>
+#include <tuple>
+
+#include <pcl/test/gtest.h>
+#include <pcl/pcl_tests.h>
+
+#include <pcl/point_types.h>
+#include <pcl/common/common.h>
+#include <pcl/filters/crop_hull.h>
+
+
+namespace
+{
+
+
+struct TestData
+{
+ TestData(pcl::Indices const & insideIndices, pcl::PointCloud<pcl::PointXYZ>::ConstPtr input_cloud)
+ : input_cloud_(input_cloud),
+ inside_mask_(input_cloud_->size(), false),
+ inside_indices_(insideIndices),
+ inside_cloud_(new pcl::PointCloud<pcl::PointXYZ>),
+ outside_cloud_(new pcl::PointCloud<pcl::PointXYZ>)
+ {
+ pcl::copyPointCloud(*input_cloud_, inside_indices_, *inside_cloud_);
+ for (pcl::index_t idx : inside_indices_) {
+ inside_mask_[idx] = true;
+ }
+ for (size_t i = 0; i < input_cloud_->size(); ++i) {
+ if (!inside_mask_[i]) {
+ outside_indices_.push_back(i);
+ }
+ }
+ pcl::copyPointCloud(*input_cloud_, outside_indices_, *outside_cloud_);
+ }
+
+ pcl::PointCloud<pcl::PointXYZ>::ConstPtr input_cloud_;
+ std::vector<bool> inside_mask_;
+ pcl::Indices inside_indices_, outside_indices_;
+ pcl::PointCloud<pcl::PointXYZ>::Ptr inside_cloud_, outside_cloud_;
+};
+
+
+std::vector<TestData>
+createTestDataSuite(
+ std::function<pcl::PointXYZ()> inside_point_generator,
+ std::function<pcl::PointXYZ()> outside_point_generator)
+{
+ std::vector<TestData> test_data_suite;
+ size_t const chunk_size = 1000;
+ pcl::PointCloud<pcl::PointXYZ>::Ptr inside_cloud(new pcl::PointCloud<pcl::PointXYZ>);
+ pcl::PointCloud<pcl::PointXYZ>::Ptr outside_cloud(new pcl::PointCloud<pcl::PointXYZ>);
+ pcl::PointCloud<pcl::PointXYZ>::Ptr mixed_cloud(new pcl::PointCloud<pcl::PointXYZ>);
+ pcl::Indices inside_indices_for_inside_cloud;
+ pcl::Indices inside_indices_for_outside_cloud; // empty indices, cause outside_cloud don't contains any inside point
+ pcl::Indices inside_indices_for_mixed_cloud;
+ for (size_t i = 0; i < chunk_size; ++i)
+ {
+ inside_indices_for_inside_cloud.push_back(i);
+ inside_cloud->push_back(inside_point_generator());
+ outside_cloud->push_back(outside_point_generator());
+ if (i % 2) {
+ inside_indices_for_mixed_cloud.push_back(i);
+ mixed_cloud->push_back(inside_point_generator());
+ }
+ else {
+ mixed_cloud->push_back(outside_point_generator());
+ }
+ }
+ test_data_suite.emplace_back(std::move(inside_indices_for_inside_cloud), inside_cloud);
+ test_data_suite.emplace_back(std::move(inside_indices_for_outside_cloud), outside_cloud);
+ test_data_suite.emplace_back(std::move(inside_indices_for_mixed_cloud), mixed_cloud);
+ return test_data_suite;
+}
+
+
+template <class TupleType>
+class PCLCropHullTestFixture : public ::testing::Test
+{
+ public:
+ using CropHullTestTraits = typename std::tuple_element<0, TupleType>::type;
+ using RandomGeneratorType = typename std::tuple_element<1, TupleType>::type;
+
+ PCLCropHullTestFixture()
+ {
+ baseOffsetList_.emplace_back(0, 0, 0);
+ baseOffsetList_.emplace_back(5, 1, 10);
+ baseOffsetList_.emplace_back(1, 5, 10);
+ baseOffsetList_.emplace_back(1, 10, 5);
+ baseOffsetList_.emplace_back(10, 1, 5);
+ baseOffsetList_.emplace_back(10, 5, 1);
+ }
+ protected:
+
+ void
+ SetUp () override
+ {
+ data_.clear();
+ pcl::PointCloud<pcl::PointXYZ>::Ptr input_cloud (new pcl::PointCloud<pcl::PointXYZ>);
+ for (pcl::PointXYZ const & baseOffset : baseOffsetList_)
+ {
+ pcl::copyPointCloud(*CropHullTestTraits::getHullCloud(), *input_cloud);
+ for (pcl::PointXYZ & p : *input_cloud) {
+ p.getVector3fMap() += baseOffset.getVector3fMap();
+ }
+ auto inside_point_generator = [this, &baseOffset] () {
+ pcl::PointXYZ p(rg_(), rg_(), rg_());
+ p.getVector3fMap() += baseOffset.getVector3fMap();
+ return p;
+ };
+ auto outside_point_generator = [this, &baseOffset] () {
+ std::array<float, 3> pt;
+ std::generate(pt.begin(), pt.end(),
+ [this] {
+ float v = rg_();
+ return v + std::copysign(0.51, v);
+ });
+ pcl::PointXYZ p(pt[0], pt[1], pt[2]);
+ p.getVector3fMap() += baseOffset.getVector3fMap();
+ return p;
+ };
+ pcl::CropHull<pcl::PointXYZ> crop_hull_filter = createDefaultCropHull(input_cloud);
+ std::vector<TestData> test_data_suite = createTestDataSuite(inside_point_generator, outside_point_generator);
+ data_.emplace_back(crop_hull_filter, test_data_suite);
+ }
+ }
+
+ std::vector<std::pair<pcl::CropHull<pcl::PointXYZ>, std::vector<TestData>>> data_;
+
+ private:
+ pcl::CropHull<pcl::PointXYZ>
+ createDefaultCropHull (pcl::PointCloud<pcl::PointXYZ>::ConstPtr input_cloud) const
+ {
+ //pcl::CropHull<pcl::PointXYZ> crop_hull_filter(true);
+ pcl::CropHull<pcl::PointXYZ> crop_hull_filter;
+ crop_hull_filter.setHullCloud(input_cloud->makeShared());
+ crop_hull_filter.setHullIndices(CropHullTestTraits::getHullPolygons());
+ crop_hull_filter.setDim(CropHullTestTraits::getDim());
+ return crop_hull_filter;
+ }
+
+ RandomGeneratorType rg_;
+ pcl::PointCloud<pcl::PointXYZ> baseOffsetList_;
+};
+
+
+struct CropHullTestTraits2d
+{
+ static pcl::PointCloud<pcl::PointXYZ>::ConstPtr
+ getHullCloud();
+
+ static std::vector<pcl::Vertices>
+ getHullPolygons();
+
+ static int
+ getDim();
+};
+
+
+struct CropHullTestTraits3d
+{
+ static pcl::PointCloud<pcl::PointXYZ>::ConstPtr
+ getHullCloud();
+
+ static std::vector<pcl::Vertices>
+ getHullPolygons();
+
+ static int
+ getDim();
+};
+
+
+template <size_t seed> struct RandomGenerator
+{
+ public:
+ RandomGenerator()
+ : gen_(seed), rd_(-0.5f, 0.5f)
+ {}
+
+ float operator()() {
+ return rd_(gen_);
+ }
+
+ private:
+ std::mt19937 gen_;
+ std::uniform_real_distribution<float> rd_;
+};
+
+
+static std::vector<std::vector<pcl::index_t>> cube_elements = {
+ {0, 2, 1}, // l
+ {1, 2, 3}, // l
+ {3, 2, 6}, // f
+ {6, 2, 4}, // bt
+ {4, 2, 0}, // bt
+ {3, 7, 1}, // t
+ {1, 7, 5}, // t
+ {5, 7, 4}, // r
+ {4, 7, 6}, // r
+ {6, 7, 3}, // f
+ {5, 1, 4}, // back
+ {4, 1, 0} // back
+};
+
+
+pcl::PointCloud<pcl::PointXYZ>::ConstPtr
+CropHullTestTraits2d::getHullCloud ()
+{
+ static pcl::PointCloud<pcl::PointXYZ>::Ptr input_cloud (new pcl::PointCloud<pcl::PointXYZ>);
+ if (input_cloud->empty()) {
+ for (const float i: {-0.5f, 0.5f})
+ for (const float j: {-0.5f, .5f})
+ for (const float k: {0.f, -0.1f})
+ input_cloud->emplace_back(i, j, k);
+ }
+ return input_cloud;
+}
+
+std::vector<pcl::Vertices>
+CropHullTestTraits2d::getHullPolygons ()
+{
+ std::vector<pcl::Vertices> polygons(12);
+ for (size_t i = 0; i < 12; ++i) {
+ polygons[i].vertices = cube_elements[i];
+ }
+ return polygons;
+}
+
+int
+CropHullTestTraits2d::getDim ()
+{
+ return 2;
+}
+
+
+pcl::PointCloud<pcl::PointXYZ>::ConstPtr
+CropHullTestTraits3d::getHullCloud ()
+{
+ static pcl::PointCloud<pcl::PointXYZ>::Ptr input_cloud (new pcl::PointCloud<pcl::PointXYZ>);
+ if (input_cloud->empty()) {
+ for (const float i: {-0.5f, 0.5f})
+ for (const float j: {-0.5f, 0.5f})
+ for (const float k: {-0.5f, 0.5f})
+ input_cloud->emplace_back(i, j, k);
+ }
+ return input_cloud;
+}
+
+std::vector<pcl::Vertices>
+CropHullTestTraits3d::getHullPolygons ()
+{
+ std::vector<pcl::Vertices> polygons(12);
+ for (size_t i = 0; i < 12; ++i) {
+ polygons[i].vertices = cube_elements[i];
+ }
+ return polygons;
+}
+
+int
+CropHullTestTraits3d::getDim ()
+{
+ return 3;
+}
+
+} // end of anonymous namespace
+using CropHullTestTraits2dList = std::tuple<
+ std::tuple<CropHullTestTraits2d, RandomGenerator<0>>,
+ std::tuple<CropHullTestTraits2d, RandomGenerator<123>>,
+ std::tuple<CropHullTestTraits2d, RandomGenerator<456>>
+ >;
+using CropHullTestTraits3dList = std::tuple<
+ std::tuple<CropHullTestTraits3d, RandomGenerator<0>>,
+ std::tuple<CropHullTestTraits3d, RandomGenerator<123>>,
+ std::tuple<CropHullTestTraits3d, RandomGenerator<456>>
+ >;
+using CropHullTestTypes = ::testing::Types<
+ std::tuple_element<0, CropHullTestTraits2dList>::type,
+ std::tuple_element<1, CropHullTestTraits2dList>::type,
+ std::tuple_element<2, CropHullTestTraits2dList>::type,
+ std::tuple_element<0, CropHullTestTraits3dList>::type,
+ std::tuple_element<1, CropHullTestTraits3dList>::type,
+ std::tuple_element<2, CropHullTestTraits3dList>::type
+ >;
+TYPED_TEST_SUITE(PCLCropHullTestFixture, CropHullTestTypes);
+
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// since test input cloud has same distribution for all dimensions, this test also check problem from issue #3960 //
+TYPED_TEST (PCLCropHullTestFixture, simple_test)
+{
+ for (auto & entry : this->data_)
+ {
+ auto & crop_hull_filter = entry.first;
+ for (TestData const & test_data : entry.second)
+ {
+ crop_hull_filter.setInputCloud(test_data.input_cloud_);
+ pcl::Indices filtered_indices;
+ crop_hull_filter.filter(filtered_indices);
+ pcl::test::EXPECT_EQ_VECTORS(test_data.inside_indices_, filtered_indices);
+ }
+ }
+}
+
+
+//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+TYPED_TEST (PCLCropHullTestFixture, test_cloud_filtering)
+{
+ for (auto & entry : this->data_)
+ {
+ auto & crop_hull_filter = entry.first;
+ for (TestData const & test_data : entry.second)
+ {
+ crop_hull_filter.setInputCloud(test_data.input_cloud_);
+ pcl::PointCloud<pcl::PointXYZ> filteredCloud;
+ crop_hull_filter.filter(filteredCloud);
+ ASSERT_EQ (test_data.inside_cloud_->size(), filteredCloud.size());
+ pcl::index_t cloud_size = test_data.inside_cloud_->size();
+ for (pcl::index_t i = 0; i < cloud_size; ++i)
+ {
+ EXPECT_XYZ_NEAR(test_data.inside_cloud_->at(i), filteredCloud[i], 1e-5);
+ }
+ }
+ }
+}
+
+
+//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// this test will pass only for 2d case //
+template <class T>
+struct PCLCropHullTestFixture2dCrutch : PCLCropHullTestFixture<T>
+{};
+using CropHullTestTraits2dTypes = ::testing::Types<
+ std::tuple_element<0, CropHullTestTraits2dList>::type,
+ std::tuple_element<1, CropHullTestTraits2dList>::type,
+ std::tuple_element<2, CropHullTestTraits2dList>::type
+ >;
+TYPED_TEST_SUITE(PCLCropHullTestFixture2dCrutch, CropHullTestTraits2dTypes);
+
+TYPED_TEST (PCLCropHullTestFixture2dCrutch, test_crop_inside)
+{
+ for (auto & entry : this->data_)
+ {
+ auto & crop_hull_filter = entry.first;
+ for (TestData const & test_data : entry.second)
+ {
+ crop_hull_filter.setInputCloud(test_data.input_cloud_);
+ crop_hull_filter.setCropOutside(false);
+ pcl::Indices filtered_indices;
+ crop_hull_filter.filter(filtered_indices);
+ pcl::test::EXPECT_EQ_VECTORS(test_data.outside_indices_, filtered_indices);
+ }
+ }
+}
+
+
+
+/* ---[ */
+int
+main (int argc, char** argv)
+{
+ // Testing
+ testing::InitGoogleTest (&argc, argv);
+ return (RUN_ALL_TESTS ());
+}
+/* ]--- */
PCLPointCloud2::Ptr cloud_blob (new PCLPointCloud2);
PointCloud<PointXYZ>::Ptr cloud (new PointCloud<PointXYZ>);
-std::vector<int> indices_;
PointCloud<PointXYZRGB>::Ptr cloud_organized (new PointCloud<PointXYZRGB>);
eie.filter (result);
EXPECT_EQ (result.size (), 0);
- empty.points.resize (10);
+ empty.resize (10);
empty.width = 10; empty.height = 1;
eie.setInputCloud (empty.makeShared ());
for (int i = 0; i < 10; ++i)
/*
PointCloud<PointXYZ> sc, scf;
- sc.points.resize (5); sc.width = 5; sc.height = 1; sc.is_dense = true;
+ sc.resize (5); sc.width = 5; sc.height = 1; sc.is_dense = true;
for (int i = 0; i < 5; i++)
{
sc[i].x = sc[i].z = 0;
pt.r = col_r[i];
pt.g = col_g[i];
pt.b = col_b[i];
- cloud_rgb_.points.push_back (pt);
+ cloud_rgb_.push_back (pt);
}
toPCLPointCloud2 (cloud_rgb_, cloud_rgb_blob_);
pt.y = 0.0f;
pt.z = 0.0f;
pt.rgba = *reinterpret_cast<std::uint32_t*> (&rgba);
- cloud_rgba_.points.push_back (pt);
+ cloud_rgba_.push_back (pt);
}
toPCLPointCloud2 (cloud_rgba_, cloud_rgba_blob_);
for (unsigned xIdx = 0; xIdx < 2; ++xIdx, ++idx)
{
PointNormal& voxel = ground_truth [xIdx][yIdx][zIdx];
- PointNormal& point = output.points [idx];
+ PointNormal& point = output[idx];
// check for point equalities
EXPECT_EQ (voxel.x, point.x);
EXPECT_EQ (voxel.y, point.y);
for (unsigned xIdx = 0; xIdx < 2; ++xIdx, ++idx)
{
PointNormal& voxel = ground_truth [xIdx][yIdx][zIdx];
- PointNormal& point = output.points [idx];
+ PointNormal& point = output[idx];
// check for point equalities
EXPECT_EQ (voxel.x, point.x);
EXPECT_EQ (voxel.y, point.y);
EXPECT_NEAR (leaves[2]->getMean ()[2], 0.0508024, 1e-4);
}
+//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+TEST (VoxelGridMinPoints, Filters)
+{
+ // Setup input with 4 clusters, single point at 0,0,0 and 1,1,1 with 5 point cluster around 0.11,0.11,0.11 and 6 point cluster around 0.31,0.31,0.31
+ PointCloud<PointXYZRGB>::Ptr input(new PointCloud<PointXYZRGB>());
+
+ input->emplace_back(0.0f, 0.0f, 0.0f);
+ std::vector<float> offsets {0.001, 0.002, 0.003, -0.001, -0.002, -0.003};
+ for (unsigned int i=0; i<5; ++i) {
+ input->emplace_back(0.11f+offsets[i], 0.11f+offsets[i], 0.11f+offsets[i],200,0,0);
+ input->emplace_back(0.31f+offsets[i], 0.31f+offsets[i], 0.31f+offsets[i],0,127,127);
+ }
+ input->emplace_back(0.31f+offsets[5], 0.31f+offsets[5], 0.31f+offsets[5],0,127,127);
+ input->emplace_back(1.0f, 1.0f, 1.0f);
+
+ // Test the PointCloud<PointT> VoxelGrid filter method
+ PointCloud<PointXYZRGB> outputMin4;
+ VoxelGrid<PointXYZRGB> grid;
+ // Run filter on input with MinimumPoints threshold at 4
+ grid.setLeafSize (0.02f, 0.02f, 0.02f);
+ grid.setInputCloud (input);
+ grid.setMinimumPointsNumberPerVoxel(4);
+ grid.setDownsampleAllData(true);
+ grid.filter (outputMin4);
+
+ // Verify 2 clusters (0.11 and 0.31) passed threshold and verify their location and color
+ EXPECT_EQ (outputMin4.size (), 2);
+ // Offset noise applied by offsets vec are 1e-3 magnitude, so check within 1e-2
+ EXPECT_NEAR (outputMin4[0].x, input->at(1).x, 1e-2);
+ EXPECT_NEAR (outputMin4[0].y, input->at(1).y, 1e-2);
+ EXPECT_NEAR (outputMin4[0].z, input->at(1).z, 1e-2);
+ EXPECT_NEAR (outputMin4[0].r, input->at(1).r, 1);
+
+ EXPECT_NEAR (outputMin4[1].x, input->at(2).x, 1e-2);
+ EXPECT_NEAR (outputMin4[1].y, input->at(2).y, 1e-2);
+ EXPECT_NEAR (outputMin4[1].z, input->at(2).z, 1e-2);
+ EXPECT_NEAR (outputMin4[1].g, input->at(2).g, 1);
+ EXPECT_NEAR (outputMin4[1].b, input->at(2).b, 1);
+
+ // Run filter again on input with MinimumPoints threshold at 6
+ PointCloud<PointXYZRGB> outputMin6;
+ grid.setMinimumPointsNumberPerVoxel(6);
+ grid.setDownsampleAllData(false);
+ grid.filter (outputMin6);
+
+ // Verify 1 cluster (0.31) passed threshold and verify location
+ EXPECT_EQ (outputMin6.size (), 1);
+
+ EXPECT_NEAR (outputMin6[0].x, input->at(2).x, 1e-2);
+ EXPECT_NEAR (outputMin6[0].y, input->at(2).y, 1e-2);
+ EXPECT_NEAR (outputMin6[0].z, input->at(2).z, 1e-2);
+
+ // Test the pcl::PCLPointCloud2 VoxelGrid filter method
+ PCLPointCloud2 output_pc2;
+ VoxelGrid<PCLPointCloud2> grid_pc2;
+ PCLPointCloud2::Ptr input_pc2(new PCLPointCloud2());
+
+ // Use same input as above converted to PCLPointCloud2
+ toPCLPointCloud2(*input, *input_pc2);
+
+ // Run filter on input with MinimumPoints threshold at 4
+ grid_pc2.setLeafSize (0.02f, 0.02f, 0.02f);
+ grid_pc2.setInputCloud (input_pc2);
+ grid_pc2.setMinimumPointsNumberPerVoxel(4);
+ grid_pc2.setDownsampleAllData(true);
+ grid_pc2.filter (output_pc2);
+
+ // Convert back to PointXYZRGB for easier data access
+ PointCloud<pcl::PointXYZRGB>::Ptr out_pc( new pcl::PointCloud<pcl::PointXYZRGB> );
+ pcl::fromPCLPointCloud2( output_pc2, *out_pc );
+
+ // Verify 2 clusters (0.11 and 0.31) passed threshold and verify their location and color
+ // PCLPointCloud2 output should be same as PointCloudXYZRGB, account for floating point rounding error with 1e-4
+ EXPECT_EQ (out_pc->points.size (), outputMin4.size());
+
+ EXPECT_NEAR (out_pc->at(0).x, outputMin4[0].x, 1e-4);
+ EXPECT_NEAR (out_pc->at(0).y, outputMin4[0].y, 1e-4);
+ EXPECT_NEAR (out_pc->at(0).z, outputMin4[0].z, 1e-4);
+ EXPECT_NEAR (out_pc->at(0).r, outputMin4[0].r, 1);
+
+ EXPECT_NEAR (out_pc->at(1).x, outputMin4[1].x, 1e-4);
+ EXPECT_NEAR (out_pc->at(1).y, outputMin4[1].y, 1e-4);
+ EXPECT_NEAR (out_pc->at(1).z, outputMin4[1].z, 1e-4);
+ EXPECT_NEAR (out_pc->at(1).g, outputMin4[1].g, 1);
+ EXPECT_NEAR (out_pc->at(1).b, outputMin4[1].b, 1);
+
+ // Run filter again on input with MinimumPoints threshold at 6
+ grid_pc2.setMinimumPointsNumberPerVoxel(6);
+ grid_pc2.setDownsampleAllData(false);
+ grid_pc2.filter (output_pc2);
+
+ // Convert back to PointXYZRGB for easier data access
+ pcl::fromPCLPointCloud2( output_pc2, *out_pc );
+
+ // Verify 1 cluster (0.31) passed threshold and verify location
+ EXPECT_EQ (out_pc->points.size (), outputMin6.size());
+
+ EXPECT_NEAR (out_pc->at(0).x, outputMin6[0].x, 1e-4);
+ EXPECT_NEAR (out_pc->at(0).y, outputMin6[0].y, 1e-4);
+ EXPECT_NEAR (out_pc->at(0).z, outputMin6[0].z, 1e-4);
+}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
TEST (ProjectInliers, Filters)
{
proj.setModelCoefficients (coefficients);
proj.filter (output);
- for (const auto &point : output.points)
+ for (const auto &point : output)
EXPECT_NEAR (point.z, 0.0, 1e-4);
// Test the pcl::PCLPointCloud2 method
fromPCLPointCloud2 (output_blob, output);
- for (const auto &point : output.points)
+ for (const auto &point : output)
EXPECT_NEAR (point.z, 0.0, 1e-4);
}
condrem.filter (output);
int num_not_nan = 0;
- for (const auto &point : output.points)
+ for (const auto &point : output)
{
if (std::isfinite (point.x) &&
std::isfinite (point.y) &&
condrem_.filter (output);
num_not_nan = 0;
- for (const auto &point : output.points)
+ for (const auto &point : output)
{
if (std::isfinite (point.x) &&
std::isfinite (point.y) &&
EXPECT_EQ ((*cloud)[cloud->size () - 1].z, output[output.size () - 1].z);
int num_not_nan = 0;
- for (const auto &point : output.points)
+ for (const auto &point : output)
{
if (std::isfinite (point.x) &&
std::isfinite (point.y) &&
EXPECT_EQ ((*cloud)[cloud->size () - 1].z, output[output.size () - 1].z);
num_not_nan = 0;
- for (const auto &point : output.points)
+ for (const auto &point : output)
{
if (std::isfinite (point.x) &&
std::isfinite (point.y) &&
pt.x = i;
pt.y = j;
pt.z = 1;
- incloud->points.push_back (pt);
+ incloud->push_back (pt);
}
}
incloud->height = 1;
ssn_filter.filter (outcloud);
// All the sampled points should have normals along the direction of Z axis
- for (const auto &point : outcloud.points)
+ for (const auto &point : outcloud)
{
EXPECT_NEAR (point.normal[0], 0, 1e-3);
EXPECT_NEAR (point.normal[1], 0, 1e-3);
// Adding a shadow point
PointXYZ pt (.0f, .0f, .1f);
- input->points.push_back (pt);
+ input->push_back (pt);
input->height = 1;
input->width = input->size ();
pt.x = float (i);
pt.y = float (j);
pt.z = float (k);
- input->points.push_back (pt);
+ input->push_back (pt);
}
}
}
// Input without NaN
pcl::PointCloud<pcl::PointXYZRGB> cloud_organized_nonan;
- std::vector<int> dummy;
+ pcl::Indices dummy;
pcl::removeNaNFromPointCloud<pcl::PointXYZRGB> (*cloud_organized, cloud_organized_nonan, dummy);
// Viewpoint
// Search parameters
const int k = 5;
- std::vector<std::vector<int> > k_indices;
+ std::vector<pcl::Indices> k_indices;
std::vector<std::vector<float> > k_sqr_distances;
// Estimated and refined normal containers
// Search for neighbors
pcl::search::KdTree<pcl::PointXYZRGB> kdtree;
kdtree.setInputCloud (cloud_organized_nonan.makeShared ());
- kdtree.nearestKSearch (cloud_organized_nonan, std::vector<int> (), k, k_indices, k_sqr_distances);
+ kdtree.nearestKSearch (cloud_organized_nonan, pcl::Indices (), k, k_indices, k_sqr_distances);
/*
* Estimate normals
seg.segment (*inliers, *coefficients);
// Read out SAC model
- const std::vector<int>& idx_table = inliers->indices;
+ const auto& idx_table = inliers->indices;
float a = coefficients->values[0];
float b = coefficients->values[1];
float c = coefficients->values[2];
int num_nans = 0;
// Loop
- for (const int &idx : idx_table)
+ for (const auto &idx : idx_table)
{
float tmp;
loadPCDFile (file_name, *cloud_blob);
fromPCLPointCloud2 (*cloud_blob, *cloud);
- indices_.resize (cloud->size ());
- for (index_t i = 0; i < static_cast<index_t>(indices_.size ()); ++i)
- indices_[i] = i;
-
loadPCDFile (argv[2], *cloud_organized);
TEST(FunctorFilterTrait, CheckCompatibility)
{
const auto copy_all = [](PointCloud<PointXYZ>, index_t) { return 0; };
- EXPECT_TRUE((is_functor_for_filter_v<PointXYZ, decltype(copy_all)>));
+ EXPECT_TRUE((is_function_object_for_filter_v<PointXYZ, decltype(copy_all)>));
const auto ref_all = [](PointCloud<PointXYZ>&, index_t&) { return 0; };
- EXPECT_FALSE((is_functor_for_filter_v<PointXYZ, decltype(ref_all)>));
+ EXPECT_FALSE((is_function_object_for_filter_v<PointXYZ, decltype(ref_all)>));
const auto ref_cloud = [](PointCloud<PointXYZ>&, index_t) { return 0; };
- EXPECT_FALSE((is_functor_for_filter_v<PointXYZ, decltype(ref_cloud)>));
+ EXPECT_FALSE((is_function_object_for_filter_v<PointXYZ, decltype(ref_cloud)>));
const auto const_ref_cloud = [](const PointCloud<PointXYZ>&, index_t) { return 0; };
- EXPECT_TRUE((is_functor_for_filter_v<PointXYZ, decltype(const_ref_cloud)>));
+ EXPECT_TRUE((is_function_object_for_filter_v<PointXYZ, decltype(const_ref_cloud)>));
const auto const_ref_all = [](const PointCloud<PointXYZ>&, const index_t&) {
return 0;
};
- EXPECT_TRUE((is_functor_for_filter_v<PointXYZ, decltype(const_ref_all)>));
+ EXPECT_TRUE((is_function_object_for_filter_v<PointXYZ, decltype(const_ref_all)>));
}
struct FunctorFilterRandom : public testing::TestWithParam<std::uint32_t> {
};
for (const auto& keep_removed : {true, false}) {
- FunctorFilter<PointXYZ, decltype(lambda)> filter{lambda, keep_removed};
+ advanced::FunctorFilter<PointXYZ, decltype(lambda)> filter{lambda, keep_removed};
filter.setInputCloud(cloud);
const auto removed_size = filter.getRemovedIndices()->size();
} // namespace type_test
template <typename T>
-struct FunctorFilterFunctor : public ::testing::Test {
+struct FunctorFilterFunctionObject : public ::testing::Test {
void
SetUp() override
{
}
PointCloud<PointXYZ> cloud;
};
-TYPED_TEST_SUITE_P(FunctorFilterFunctor);
+TYPED_TEST_SUITE_P(FunctorFilterFunctionObject);
-TYPED_TEST_P(FunctorFilterFunctor, type_check)
+TYPED_TEST_P(FunctorFilterFunctionObject, type_check)
{
using FunctorT = TypeParam;
const auto& functor = type_test::Helper<FunctorT>::value;
- FunctorFilter<PointXYZ, FunctorT> filter_lambda{functor};
- EXPECT_EQ(filter_lambda.getFunctor()(this->cloud, 0), 0);
- EXPECT_EQ(filter_lambda.getFunctor()(this->cloud, 1), 1);
+ advanced::FunctorFilter<PointXYZ, FunctorT> filter_lambda{functor};
+ EXPECT_EQ(filter_lambda.getFunctionObject()(this->cloud, 0), 0);
+ EXPECT_EQ(filter_lambda.getFunctionObject()(this->cloud, 1), 1);
}
-REGISTER_TYPED_TEST_SUITE_P(FunctorFilterFunctor, type_check);
-INSTANTIATE_TYPED_TEST_SUITE_P(pcl, FunctorFilterFunctor, type_test::types);
+REGISTER_TYPED_TEST_SUITE_P(FunctorFilterFunctionObject, type_check);
+INSTANTIATE_TYPED_TEST_SUITE_P(pcl, FunctorFilterFunctionObject, type_test::types);
int
main(int argc, char** argv)
TEST (ModelOutlierRemoval, Model_Outlier_Filter)
{
PointCloud<PointXYZ>::Ptr cloud_filter_out (new PointCloud<PointXYZ>);
- std::vector<int> ransac_inliers;
+ pcl::Indices ransac_inliers;
float thresh = 0.01;
//run ransac
Eigen::VectorXf model_coefficients;
// Conditioning number should be loosely close to the expected number. Adopting 10% of the reference value
EXPECT_NEAR (113.29773, cond_num_walls, 10.);
- IndicesPtr walls_indices (new std::vector<int> ());
+ IndicesPtr walls_indices (new pcl::Indices ());
covariance_sampling.filter (*walls_indices);
covariance_sampling.setIndices (walls_indices);
double cond_num_walls_sampled = covariance_sampling.computeConditionNumber ();
// Conditioning number should be loosely close to the expected number. Adopting 10% of the reference value
EXPECT_NEAR (cond_num_turtle, 20661.7663, 2e4);
- IndicesPtr turtle_indices (new std::vector<int> ());
+ IndicesPtr turtle_indices (new pcl::Indices ());
covariance_sampling.filter (*turtle_indices);
covariance_sampling.setIndices (turtle_indices);
double cond_num_turtle_sampled = covariance_sampling.computeConditionNumber ();
sample.setSample (10);
// Indices
- std::vector<int> indices;
+ pcl::Indices indices;
sample.filter (indices);
EXPECT_EQ (int (indices.size ()), 10);
sample2.setSample (10);
// Indices
- std::vector<int> indices2;
+ pcl::Indices indices2;
sample2.filter (indices2);
EXPECT_EQ (int (indices2.size ()), 10);
ne.compute (*cloud_walls_normals);
copyPointCloud (*cloud_walls, *cloud_walls_normals);
- std::vector<int> aux_indices;
+ pcl::Indices aux_indices;
removeNaNFromPointCloud (*cloud_walls_normals, *cloud_walls_normals, aux_indices);
removeNaNNormalsFromPointCloud (*cloud_walls_normals, *cloud_walls_normals, aux_indices);
ASSERT_EQ(output.size(), 1000);
EXPECT_EQ(removed_indices->size(), xyz->size() - 1000);
std::set<int> removed_indices_set(removed_indices->begin(), removed_indices->end());
- ASSERT_TRUE(removed_indices_set.size() == removed_indices->size());
+ ASSERT_EQ(removed_indices_set.size(), removed_indices->size());
}
int
--- /dev/null
+#!/bin/bash -eu
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+# Point Cloud Library (PCL) - www.pointclouds.org
+# Copyright (c) 2020-, Open Perception
+#
+# All rights reserved
+#
+
+# This script is run inside OSS-fuzz's docker image
+# and builds PCL's fuzzers to run continuously
+# through OSS-fuzz.
+# In the OSS-fuzz docker image PCL is located at $SRC/pcl.
+
+# The Dockerfile that builds PCL can be found here:
+# (url pending)
+
+# Build PCL
+cd pcl
+mkdir build && cd build
+pwd
+cmake -DWITH_OPENGL=FALSE \
+ -DWITH_PCAP=FALSE \
+ -DWITH_VTK=FALSE \
+ -DPCL_SHARED_LIBS:BOOL=OFF \
+ -DWITH_LIBUSB=FALSE \
+ -DWITH_QT=FALSE \
+ -DBUILD_features=OFF \
+ -DBUILD_filters=OFF \
+ -DBUILD_geometry=OFF \
+ -DBUILD_kdtree=OFF \
+ -DBUILD_keypoints=OFF \
+ -DBUILD_ml=OFF \
+ -DBUILD_outofcore=OFF \
+ -DBUILD_people=OFF \
+ -DBUILD_recognition=OFF \
+ -DBUILD_registration=OFF \
+ -DBUILD_sample_consensus=OFF \
+ -DBUILD_search=OFF \
+ -DBUILD_segmentation=OFF \
+ -DBUILD_stereo=OFF \
+ -DBUILD_surface=OFF \
+ -DBUILD_tracking=OFF \
+ -DBUILD_visualization=OFF \
+ ..
+
+make -j$(nproc)
+
+# Build fuzzers
+cd ../test/fuzz
+
+$CXX $CXXFLAGS -DPCLAPI_EXPORTS \
+ -I/src/pcl/build/include -I/src/pcl/common/include \
+ -I/src/pcl/dssdk/include \
+ -I/src/pcl/io/include -isystem /usr/include/eigen3 \
+ -O2 -g -DNDEBUG -fPIC -std=c++14 \
+ -o ply_reader_fuzzer.o -c ply_reader_fuzzer.cpp
+
+$CXX $CXXFLAGS $LIB_FUZZING_ENGINE ply_reader_fuzzer.o \
+ ../../build/lib/libpcl_io.a ../../build/lib/libpcl_io_ply.a \
+ ../../build/lib/libpcl_common.a \
+ /usr/local/lib/libboost_filesystem.a -o $OUT/ply_reader_fuzzer -lm
+
+zip $OUT/ply_reader_fuzzer_seed_corpus.zip $SRC/pcl/test/cube.ply
--- /dev/null
+#include <pcl/io/ply_io.h>
+#include <pcl/conversions.h>
+#include <pcl/PolygonMesh.h>
+#include <pcl/PCLPointCloud2.h>
+#include <pcl/point_cloud.h>
+#include <pcl/point_types.h>
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
+ pcl::PCLPointCloud2 cloud_blob;
+ pcl::PLYReader reader;
+ char filename[256];
+ sprintf(filename, "/tmp/libfuzzer.%d", getpid());
+
+ FILE *fp = fopen(filename, "wb");
+ if (!fp)
+ return 0;
+ fwrite(data, size, 1, fp);
+ fclose(fp);
+
+ reader.read (filename, cloud_blob);
+ unlink(filename);
+ return 0;
+}
#include <type_traits>
+using pcl::index_t;
+using pcl::Indices;
+
////////////////////////////////////////////////////////////////////////////////
template <class MeshTraitsT>
}
// Faces
- std::vector <std::uint32_t> face;
+ Indices face;
face.push_back (0);
face.push_back (1);
}
pcl::PointCloud <pcl::PointXYZRGBNormal> vertices_;
- std::vector <std::vector <std::uint32_t> > non_manifold_faces_;
- std::vector <std::vector <std::uint32_t> > manifold_faces_;
+ std::vector <Indices> non_manifold_faces_;
+ std::vector <Indices> manifold_faces_;
public:
PCL_MAKE_ALIGNED_OPERATOR_NEW
using VertexIndex = typename Mesh::VertexIndex;
using VertexIndices = typename Mesh::VertexIndices;
- const std::vector <std::vector <std::uint32_t> > faces =
+ const std::vector <Indices> faces =
Mesh::IsManifold::value ? this->manifold_faces_ :
this->non_manifold_faces_;
for (std::size_t i=0; i<faces.size (); ++i)
{
vi.clear ();
- for (const unsigned int &j : faces [i])
+ for (const auto &j : faces [i])
{
vi.push_back (VertexIndex (static_cast <int> (j)));
}
}
// Check the faces
- const std::vector <std::vector <std::uint32_t> > expected_faces =
+ const std::vector <Indices> expected_faces =
Mesh::IsManifold::value ? this->manifold_faces_ :
this->non_manifold_faces_;
ASSERT_EQ (expected_faces.size (), half_edge_mesh.sizeFaces ());
- std::vector <std::uint32_t> converted_face;
+ Indices converted_face;
for (std::size_t i=0; i<half_edge_mesh.sizeFaces (); ++i)
{
VAFC circ = half_edge_mesh.getVertexAroundFaceCirculator (FaceIndex (i));
converted_face.clear ();
do
{
- converted_face.push_back (static_cast <std::uint32_t> (circ.getTargetIndex ().get ()));
+ converted_face.push_back (static_cast <index_t> (circ.getTargetIndex ().get ()));
} while (++circ != circ_end);
EXPECT_TRUE (isCircularPermutation (expected_faces [i], converted_face)) << "Face number " << i;
ASSERT_EQ (9, faces.size ());
for (std::size_t i=0; i<order_vec.size (); ++i)
{
- std::stringstream ss;
- ss << "Configuration " << i;
- SCOPED_TRACE (ss.str ());
+ SCOPED_TRACE ("Configuration " + std::to_string(i));
const std::vector <int> order = order_vec [i];
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_host(new pcl::PointCloud<pcl::PointXYZ>);
cloud_host->width = data.points.size();
cloud_host->height = 1;
- cloud_host->points.resize (cloud_host->width * cloud_host->height);
- std::transform(data.points.begin(), data.points.end(), cloud_host->points.begin(), DataGenerator::ConvPoint<pcl::PointXYZ>());
+ cloud_host->resize (cloud_host->width * cloud_host->height);
+ std::transform(data.points.cbegin(), data.points.cend(), cloud_host->begin(), DataGenerator::ConvPoint<pcl::PointXYZ>());
float host_octree_resolution = 25.f;
cv::Octree octree_opencv;
const static int opencv_octree_points_per_leaf = 32;
std::vector<cv::Point3f> opencv_points(data.size());
- std::transform(data.points.begin(), data.points.end(), opencv_points.begin(), DataGenerator::ConvPoint<cv::Point3f>());
+ std::transform(data.cbegin(), data.cend(), opencv_points.begin(), DataGenerator::ConvPoint<cv::Point3f>());
{
ScopeTime t("opencv-build");
const int max_answers = 500;
float dist;
- int inds;
//host buffers
- std::vector<int> indeces;
+ std::vector<int> indices;
+ pcl::Indices indices_host;
std::vector<float> pointRadiusSquaredDistance;
#ifdef HAVE_OPENCV
std::vector<cv::Point3f> opencv_results;
#endif
//reserve
- indeces.reserve(data.data_size);
+ indices.reserve(data.data_size);
+ indices_host.reserve(data.data_size);
pointRadiusSquaredDistance.reserve(data.data_size);
#ifdef HAVE_OPENCV
opencv_results.reserve(data.data_size);
{
ScopeTime up("gpu-radius-search-{host}-all");
for(std::size_t i = 0; i < data.tests_num; ++i)
- octree_device.radiusSearchHost(data.queries[i], data.radiuses[i], indeces, max_answers);
+ octree_device.radiusSearchHost(data.queries[i], data.radiuses[i], indices, max_answers);
}
{
ScopeTime up("host-radius-search-all");
for(std::size_t i = 0; i < data.tests_num; ++i)
octree_host.radiusSearch(pcl::PointXYZ(data.queries[i].x, data.queries[i].y, data.queries[i].z),
- data.radiuses[i], indeces, pointRadiusSquaredDistance, max_answers);
+ data.radiuses[i], indices_host, pointRadiusSquaredDistance, max_answers);
}
{
{
ScopeTime up("gpu-radius-search-{host}-all");
for(std::size_t i = 0; i < data.tests_num; ++i)
- octree_device.radiusSearchHost(data.queries[i], data.shared_radius, indeces, max_answers);
+ octree_device.radiusSearchHost(data.queries[i], data.shared_radius, indices, max_answers);
}
{
ScopeTime up("host-radius-search-all");
for(std::size_t i = 0; i < data.tests_num; ++i)
octree_host.radiusSearch(pcl::PointXYZ(data.queries[i].x, data.queries[i].y, data.queries[i].z),
- data.radiuses[i], indeces, pointRadiusSquaredDistance, max_answers);
+ data.radiuses[i], indices_host, pointRadiusSquaredDistance, max_answers);
}
{
std::cout << "====== Approx nearest search =====" << std::endl;
{
- ScopeTime up("gpu-approx-nearest-batch-all");
- octree_device.approxNearestSearch(queries_device, result_device);
+ ScopeTime up("gpu-approx-nearest-batch-all");
+ pcl::gpu::Octree::ResultSqrDists sqr_distance;
+ octree_device.approxNearestSearch(queries_device, result_device, sqr_distance);
}
{
+ int inds;
ScopeTime up("gpu-approx-nearest-search-{host}-all");
for(std::size_t i = 0; i < data.tests_num; ++i)
octree_device.approxNearestSearchHost(data.queries[i], inds, dist);
}
{
+ pcl::index_t inds;
ScopeTime up("host-approx-nearest-search-all");
for(std::size_t i = 0; i < data.tests_num; ++i)
octree_host.approxNearestSearch(data.queries[i], inds, dist);
{
ScopeTime up("host-knn-search-all");
for(std::size_t i = 0; i < data.tests_num; ++i)
- octree_host.nearestKSearch(data.queries[i], k, indeces, pointRadiusSquaredDistance);
+ octree_host.nearestKSearch(data.queries[i], k, indices, pointRadiusSquaredDistance);
}*/
}
/*
- * Software License Agreement (BSD License)
+ * SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright (c) 2011, Willow Garage, Inc.
- * All rights reserved.
+ * Point Cloud Library (PCL) - www.pointclouds.org
+ * Copyright (c) 2020-, Open Perception
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of Willow Garage, Inc. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * Author: Anatoly Baskeheev, Itseez Ltd, (myname.mysurname@mycompany.com)
+ * All rights reserved
*/
-#if defined _MSC_VER
- #pragma warning (disable : 4996 4530)
-#endif
+#include <pcl/gpu/containers/device_array.h>
+#include <pcl/gpu/octree/octree.hpp>
+#include <pcl/octree/octree_search.h>
+#include <pcl/point_cloud.h>
+#include <pcl/common/distances.h>
#include <gtest/gtest.h>
-#include <iostream>
-#include <fstream>
#include <algorithm>
+#include <fstream>
+#include <iostream>
+#include <array> // for std::array
-#if defined _MSC_VER
- #pragma warning (disable: 4521)
-#endif
-#include <pcl/point_cloud.h>
-#include <pcl/octree/octree_search.h>
-#if defined _MSC_VER
- #pragma warning (default: 4521)
-#endif
-
-#include <pcl/gpu/octree/octree.hpp>
-#include <pcl/gpu/containers/device_array.h>
-
-#include "data_source.hpp"
-
-using namespace pcl::gpu;
-
-//TEST(PCL_OctreeGPU, DISABLED_approxNearesSearch)
TEST(PCL_OctreeGPU, approxNearesSearch)
-{
- DataGenerator data;
- data.data_size = 871000;
- data.tests_num = 10000;
- data.cube_size = 1024.f;
- data.max_radius = data.cube_size/30.f;
- data.shared_radius = data.cube_size/30.f;
- data.printParams();
-
- const float host_octree_resolution = 25.f;
-
- //generate
- data();
-
- //prepare device cloud
- pcl::gpu::Octree::PointCloud cloud_device;
- cloud_device.upload(data.points);
-
-
- //prepare host cloud
- pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_host(new pcl::PointCloud<pcl::PointXYZ>);
- cloud_host->width = data.points.size();
- cloud_host->height = 1;
- cloud_host->points.resize (cloud_host->width * cloud_host->height);
- std::transform(data.points.begin(), data.points.end(), cloud_host->points.begin(), DataGenerator::ConvPoint<pcl::PointXYZ>());
-
- //gpu build
- pcl::gpu::Octree octree_device;
- octree_device.setCloud(cloud_device);
- octree_device.build();
-
- //build host octree
- pcl::octree::OctreePointCloudSearch<pcl::PointXYZ> octree_host(host_octree_resolution);
- octree_host.setInputCloud (cloud_host);
- octree_host.addPointsFromInputCloud();
-
- //upload queries
- pcl::gpu::Octree::Queries queries_device;
- queries_device.upload(data.queries);
-
-
- //prepare output buffers on device
- pcl::gpu::NeighborIndices result_device(data.tests_num, 1);
- std::vector<int> result_host_pcl(data.tests_num);
- std::vector<int> result_host_gpu(data.tests_num);
- std::vector<float> dists_pcl(data.tests_num);
- std::vector<float> dists_gpu(data.tests_num);
-
- //search GPU shared
- octree_device.approxNearestSearch(queries_device, result_device);
-
- std::vector<int> downloaded;
- result_device.data.download(downloaded);
-
- for(std::size_t i = 0; i < data.tests_num; ++i)
- {
- octree_host.approxNearestSearch(data.queries[i], result_host_pcl[i], dists_pcl[i]);
- octree_device.approxNearestSearchHost(data.queries[i], result_host_gpu[i], dists_gpu[i]);
- }
-
- ASSERT_EQ ( ( downloaded == result_host_gpu ), true );
-
- int count_gpu_better = 0;
- int count_pcl_better = 0;
- float diff_pcl_better = 0;
- for(std::size_t i = 0; i < data.tests_num; ++i)
- {
- float diff = dists_pcl[i] - dists_gpu[i];
- bool gpu_better = diff > 0;
-
- ++(gpu_better ? count_gpu_better : count_pcl_better);
-
- if (!gpu_better)
- diff_pcl_better +=std::abs(diff);
- }
-
- diff_pcl_better /=count_pcl_better;
-
- std::cout << "count_gpu_better: " << count_gpu_better << std::endl;
- std::cout << "count_pcl_better: " << count_pcl_better << std::endl;
- std::cout << "avg_diff_pcl_better: " << diff_pcl_better << std::endl;
+{
+ /*
+ the test points create an octree with bounds (-1, -1, -1) and (1, 1, 1).
+ point q, represents a query point
+ ------------------------------------
+ | | |
+ | | |
+ | | |
+ | | |
+ | | |
+ |----------------------------------|
+ | x | q | |
+ | | | |
+ |-------|--------| |
+ | | y | |
+ | | | |
+ ------------------------------------
+ the final two point are positioned such that point 'x' is farther from query point 'q'
+ than 'y', but the voxel containing 'x' is closer to 'q' than the voxel containing 'y'
+ */
+
+ const std::array<pcl::PointXYZ, 10> coords{
+ pcl::PointXYZ{-1.f, -1.f, -1.f},
+ pcl::PointXYZ{-1.f, -1.f, 1.f},
+ pcl::PointXYZ{-1.f, 1.f, -1.f},
+ pcl::PointXYZ{-1.f, 1.f, 1.f},
+ pcl::PointXYZ{1.f, -1.f, -1.f},
+ pcl::PointXYZ{1.f, -1.f, 1.f},
+ pcl::PointXYZ{1.f, 1.f, -1.f},
+ pcl::PointXYZ{1.f, 1.f, 1.f},
+ pcl::PointXYZ{-0.9f, -0.2f, -0.75f},
+ pcl::PointXYZ{-0.4f, -0.6f, -0.75f},
+ };
+
+ // While the GPU implementation has a fixed depth of 10 levels, octree depth in the
+ // CPU implementation can vary based on the leaf size set by the user, which can
+ // affect the results. Therefore results would only tally if depths match.
+ //generate custom pointcloud
+ constexpr pcl::index_t point_size = 1000 * coords.size();
+ auto cloud = pcl::make_shared<pcl::PointCloud<pcl::PointXYZ>>(point_size, 1);
+
+ // copy chunks of 10 points at the same time
+ for (auto it = cloud->begin(); it != cloud->cend(); it += coords.size())
+ std::copy(coords.cbegin(), coords.cend(), it);
+
+ const std::vector<pcl::PointXYZ> queries = {
+ {-0.4, -0.2, -0.75}, // should be different across CPU and GPU if different
+ // traversal methods are used
+ {-0.6, -0.2, -0.75}, // should be same across CPU and GPU
+ {1.1, 1.1, 1.1}, // out of range query
+ };
+
+ // prepare device cloud
+ pcl::gpu::Octree::PointCloud cloud_device;
+ cloud_device.upload(cloud->points);
+
+ // gpu build
+ pcl::gpu::Octree octree_device;
+ octree_device.setCloud(cloud_device);
+ octree_device.build();
+
+ // build host octree
+ constexpr float host_octree_resolution = 0.05;
+ pcl::octree::OctreePointCloudSearch<pcl::PointXYZ> octree_host(
+ host_octree_resolution);
+ octree_host.setInputCloud(cloud);
+ octree_host.addPointsFromInputCloud();
+
+ // upload queries
+ pcl::gpu::Octree::Queries queries_device;
+ queries_device.upload(queries);
+
+ // prepare output buffers on device
+ pcl::gpu::NeighborIndices result_device(queries.size(), 1);
+ pcl::Indices result_host_pcl(queries.size());
+ std::vector<int> result_host_gpu(queries.size());
+ std::vector<float> dists_pcl(queries.size());
+ std::vector<float> dists_gpu(queries.size());
+ pcl::gpu::Octree::ResultSqrDists dists_device;
+
+ // search GPU shared
+ octree_device.approxNearestSearch(queries_device, result_device, dists_device);
+ std::vector<int> downloaded;
+ std::vector<float> dists_device_downloaded;
+ result_device.data.download(downloaded);
+ dists_device.download(dists_device_downloaded);
+
+ for (size_t i = 0; i < queries.size(); ++i) {
+ octree_host.approxNearestSearch(queries[i], result_host_pcl[i], dists_pcl[i]);
+ octree_device.approxNearestSearchHost(queries[i], result_host_gpu[i], dists_gpu[i]);
+ }
+
+ ASSERT_EQ(downloaded, result_host_gpu);
+
+ const std::array<float, 3> expected_sqr_dists {pcl::squaredEuclideanDistance(coords[8], queries[0]),
+ pcl::squaredEuclideanDistance(coords[8], queries[1]),
+ pcl::squaredEuclideanDistance(coords[7], queries[2]) };
+
+ for (size_t i = 0; i < queries.size(); ++i) {
+ ASSERT_EQ(dists_pcl[i], dists_gpu[i]);
+ ASSERT_NEAR(dists_gpu[i], dists_device_downloaded[i], 0.001);
+ ASSERT_NEAR(dists_device_downloaded[i], expected_sqr_dists[i], 0.001);
+ }
}
/* ---[ */
int
-main (int argc, char** argv)
+main(int argc, char** argv)
{
- testing::InitGoogleTest (&argc, argv);
- return (RUN_ALL_TESTS ());
+ testing::InitGoogleTest(&argc, argv);
+ return (RUN_ALL_TESTS());
}
/* ]--- */
-
#pragma warning (disable: 4521)
#endif
+#include <pcl/pcl_tests.h> // for EXPECT_EQ_VECTORS
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
#include <pcl/octree/octree_search.h>
cloud_host->width = data.points.size();
cloud_host->height = 1;
cloud_host->points.resize (cloud_host->width * cloud_host->height);
- std::transform(data.points.begin(), data.points.end(), cloud_host->points.begin(), DataGenerator::ConvPoint<pcl::PointXYZ>());
+ std::transform(data.points.cbegin(), data.points.cend(), cloud_host->begin(), DataGenerator::ConvPoint<pcl::PointXYZ>());
// build device octree
pcl::gpu::Octree octree_device;
//search host
std::vector<float> dists;
- std::vector<int> results_host;
+ pcl::Indices results_host;
octree_host.radiusSearch(pcl::PointXYZ(data.queries[i].x, data.queries[i].y, data.queries[i].z), data.radiuses[i], results_host, dists);
std::sort(results_host_gpu.begin(), results_host_gpu.end());
std::sort(results_host.begin(), results_host.end());
- ASSERT_EQ ( (results_host_gpu == results_host ), true );
- ASSERT_EQ ( (results_host_gpu == data.bfresutls[i]), true );
+ pcl::test::EXPECT_EQ_VECTORS (results_host_gpu, results_host);
+ pcl::test::EXPECT_EQ_VECTORS (results_host_gpu, data.bfresutls[i]);
sizes.push_back(results_host.size());
}
cloud_host->width = data.points.size();
cloud_host->height = 1;
cloud_host->points.resize (cloud_host->width * cloud_host->height);
- std::transform(data.points.begin(), data.points.end(), cloud_host->points.begin(), DataGenerator::ConvPoint<pcl::PointXYZ>());
+ std::transform(data.points.cbegin(), data.points.cend(), cloud_host->begin(), DataGenerator::ConvPoint<pcl::PointXYZ>());
//gpu build
pcl::gpu::Octree octree_device;
//upload queries
pcl::gpu::Octree::Queries queries_device;
queries_device.upload(data.queries);
-
- //prepare output buffers on device
- pcl::gpu::NeighborIndices result_device(data.tests_num, k);
//prepare output buffers on host
- std::vector<std::vector< int> > result_host(data.tests_num);
+ std::vector<pcl::Indices > result_host(data.tests_num);
std::vector<std::vector<float> > dists_host(data.tests_num);
for(std::size_t i = 0; i < data.tests_num; ++i)
{
result_host[i].reserve(k);
dists_host[i].reserve(k);
}
-
+
+ //prepare output buffers on device
+ pcl::gpu::NeighborIndices result_device;
+ pcl::gpu::Octree::ResultSqrDists result_sqr_distances;
+
//search GPU shared
{
pcl::ScopeTime time("1nn-gpu");
- octree_device.nearestKSearchBatch(queries_device, k, result_device);
+ octree_device.nearestKSearchBatch(queries_device, k, result_device, result_sqr_distances);
}
- std::vector<int> downloaded, downloaded_cur;
+ std::vector<int> downloaded;
result_device.data.download(downloaded);
-
+
+ std::vector<float> downloaded_sqr_dists;
+ result_sqr_distances.download(downloaded_sqr_dists);
+
{
pcl::ScopeTime time("1nn-cpu");
for(std::size_t i = 0; i < data.tests_num; ++i)
for(std::size_t i = 0; i < data.tests_num; ++i)
{
//std::cout << i << std::endl;
- std::vector<int>& results_host_cur = result_host[i];
+ const auto& results_host_cur = result_host[i];
std::vector<float>& dists_host_cur = dists_host[i];
int beg = i * k;
int end = beg + k;
- downloaded_cur.assign(downloaded.begin() + beg, downloaded.begin() + end);
-
+ const auto beg_dist2 = downloaded_sqr_dists.cbegin() + beg;
+ const auto end_dist2 = downloaded_sqr_dists.cbegin() + end;
+
+ const std::vector<int> downloaded_cur (downloaded.cbegin() + beg, downloaded.cbegin() + end);
+ const std::vector<float> downloaded_sqr_dists_cur (beg_dist2, end_dist2);
+
std::vector<PriorityPair> pairs_host;
std::vector<PriorityPair> pairs_gpu;
for(int n = 0; n < k; ++n)
PriorityPair gpu;
gpu.index = downloaded_cur[n];
-
- float dist = (data.queries[i].getVector3fMap() - data.points[gpu.index].getVector3fMap()).norm();
- gpu.dist2 = dist * dist;
+ gpu.dist2 = downloaded_sqr_dists_cur[n];
pairs_gpu.push_back(gpu);
}
PCL_ADD_TEST(io_octree_compression test_octree_compression
FILES test_octree_compression.cpp
- LINK_WITH pcl_gtest pcl_common pcl_io pcl_octree)
+ LINK_WITH pcl_gtest pcl_common pcl_io pcl_octree
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/milk_color.pcd")
#include <string>
#include <thread>
#include <vector>
+#include <boost/filesystem.hpp> // for directory_iterator, extension
+#include <boost/algorithm/string/case_conv.hpp> // for to_upper_copy
using namespace std::chrono_literals;
int fpfh_idx = pcl::getFieldIndex (blob, "fpfh");
EXPECT_EQ (fpfh_idx, 0);
float val[33];
- for (std::size_t i = 0; i < blob.fields[fpfh_idx].count; ++i)
+ for (uindex_t i = 0; i < blob.fields[fpfh_idx].count; ++i)
memcpy (&val[i], &blob.data[0 * blob.point_step + blob.fields[fpfh_idx + 0].offset + i * sizeof (float)], sizeof (float));
EXPECT_EQ (val[0], 0);
cloud_a.width = 5;
cloud_b.width = 3;
cloud_a.height = cloud_b.height = 1;
- cloud_a.points.resize (cloud_a.width * cloud_a.height);
- cloud_b.points.resize (cloud_b.width * cloud_b.height);
+ cloud_a.resize (cloud_a.width * cloud_a.height);
+ cloud_b.resize (cloud_b.width * cloud_b.height);
for (auto &point : cloud_a.points)
{
// Fill in the cloud data
cloud_a.width = cloud_b.width = 5;
cloud_a.height = cloud_b.height = 1;
- cloud_a.points.resize (cloud_a.width * cloud_a.height);
- cloud_b.points.resize (cloud_b.width * cloud_b.height);
+ cloud_a.resize (cloud_a.width * cloud_a.height);
+ cloud_b.resize (cloud_b.width * cloud_b.height);
for (auto& point: cloud_a)
{
cloud.width = 640;
cloud.height = 480;
- cloud.points.resize (cloud.width * cloud.height);
+ cloud.resize (cloud.width * cloud.height);
cloud.is_dense = true;
srand (static_cast<unsigned int> (time (nullptr)));
EXPECT_FLOAT_EQ (cloud[nr_p - 1].z, last.z); // test for fromPCLPointCloud2 ()
EXPECT_FLOAT_EQ (float (cloud[nr_p - 1].intensity), float (last.intensity)); // test for fromPCLPointCloud2 ()
- std::vector<int> indices (cloud.width * cloud.height / 2);
+ pcl::Indices indices (cloud.width * cloud.height / 2);
for (int i = 0; i < static_cast<int> (indices.size ()); ++i) indices[i] = i;
// Save as ASCII
try
cloud.width = 640;
cloud.height = 480;
- cloud.points.resize (cloud.width * cloud.height);
+ cloud.resize (cloud.width * cloud.height);
cloud.is_dense = true;
srand (static_cast<unsigned int> (time (nullptr)));
TEST (PCL, PCDReaderWriterASCIIColorPrecision)
{
PointCloud<PointXYZRGB> cloud;
- cloud.points.reserve (256 / 4 * 256 / 4 * 256 / 4 * 256 / 16);
+ cloud.reserve (256 / 4 * 256 / 4 * 256 / 4 * 256 / 16);
for (std::size_t r_i = 0; r_i < 256; r_i += 5)
for (std::size_t g_i = 0; g_i < 256; g_i += 5)
for (std::size_t b_i = 0; b_i < 256; b_i += 5)
cloud.width = 300;
cloud.height = 1;
- cloud.points.resize (cloud.width * cloud.height);
+ cloud.resize (cloud.width * cloud.height);
cloud.is_dense = true;
{
PointCloud<PointXYZFPFH33> cloud;
cloud.width = 2; cloud.height = 1;
- cloud.points.resize (2);
+ cloud.resize (2);
cloud[0].x = cloud[0].y = cloud[0].z = 1;
cloud[1].x = cloud[1].y = cloud[1].z = 2;
}
savePCDFile ("v.pcd", cloud);
- cloud.points.clear ();
+ cloud.clear ();
loadPCDFile ("v.pcd", cloud);
EXPECT_EQ (cloud.width, 2);
TEST (PCL, EigenConversions)
{
PointCloud<PointXYZ> cloud;
- cloud.points.resize (5);
+ cloud.resize (5);
for (std::size_t i = 0; i < cloud.size (); ++i)
cloud[i].x = cloud[i].y = cloud[i].z = static_cast<float> (i);
// Fill in the cloud data
cloud_a.width = cloud_b.width = 3;
cloud_a.height = cloud_b.height = 1;
- cloud_a.points.resize (cloud_a.width * cloud_a.height);
- cloud_b.points.resize (cloud_b.width * cloud_b.height);
+ cloud_a.resize (cloud_a.width * cloud_a.height);
+ cloud_b.resize (cloud_b.width * cloud_b.height);
for (std::size_t i = 0; i < cloud_a.size (); ++i)
{
PointCloud<PointXYZ> cloud, cloud2;
cloud.width = 640;
cloud.height = 480;
- cloud.points.resize (cloud.width * cloud.height);
+ cloud.resize (cloud.width * cloud.height);
cloud.is_dense = true;
srand (static_cast<unsigned int> (time (nullptr)));
PointCloud<PointXYZRGBNormal> cloud, cloud2;
cloud.width = 640;
cloud.height = 480;
- cloud.points.resize (cloud.width * cloud.height);
+ cloud.resize (cloud.width * cloud.height);
cloud.is_dense = true;
srand (static_cast<unsigned int> (time (nullptr)));
PointCloud<PointXYZRGBNormal> cloud;
cloud.width = 640;
cloud.height = 480;
- cloud.points.resize (cloud.width * cloud.height);
+ cloud.resize (cloud.width * cloud.height);
cloud.is_dense = true;
srand (static_cast<unsigned int> (time (nullptr)));
PointCloud<PointXYZ> cloud, cloud2;
cloud.width = 640;
cloud.height = 480;
- cloud.points.resize (cloud.width * cloud.height);
+ cloud.resize (cloud.width * cloud.height);
cloud.is_dense = true;
srand (static_cast<unsigned int> (time (nullptr)));
char* pcd_file;
#define MAX_POINTS 10000.0
-#define MAX_XYZ 1024.0
#define MAX_COLOR 255
-#define NUMBER_OF_TEST_RUNS 2
+#define NUMBER_OF_TEST_RUNS 3
+
+template<typename PointT> inline PointT generateRandomPoint(const float MAX_XYZ);
+
+template<> inline pcl::PointXYZRGBA generateRandomPoint(const float MAX_XYZ) {
+ return pcl::PointXYZRGBA(static_cast<float> (MAX_XYZ * rand() / RAND_MAX),
+ static_cast<float> (MAX_XYZ * rand() / RAND_MAX),
+ static_cast<float> (MAX_XYZ * rand() / RAND_MAX),
+ static_cast<int> (MAX_COLOR * rand() / RAND_MAX),
+ static_cast<int> (MAX_COLOR * rand() / RAND_MAX),
+ static_cast<int> (MAX_COLOR * rand() / RAND_MAX),
+ static_cast<int> (MAX_COLOR * rand() / RAND_MAX));
+}
+
+template<> inline pcl::PointXYZ generateRandomPoint(const float MAX_XYZ) {
+ return pcl::PointXYZ(static_cast<float> (MAX_XYZ * rand() / RAND_MAX),
+ static_cast<float> (MAX_XYZ * rand() / RAND_MAX),
+ static_cast<float> (MAX_XYZ * rand() / RAND_MAX));
+}
+
+template<typename PointT> inline
+typename pcl::PointCloud<PointT>::Ptr generateRandomCloud(const float MAX_XYZ) {
+ // empty point cloud hangs decoder
+ const unsigned int point_count = 1 + (MAX_POINTS - 1) * rand() / RAND_MAX;
+ // create shared pointcloud instances
+ typename pcl::PointCloud<PointT>::Ptr cloud(new pcl::PointCloud<PointT>());
+ for (unsigned int point = 0; point < point_count; point++) {
+ cloud->push_back(generateRandomPoint<PointT>(MAX_XYZ));
+ }
+ return cloud;
+}
-TEST (PCL, OctreeDeCompressionRandomPointXYZRGBA)
+template<typename PointT>
+class OctreeDeCompressionTest : public testing::Test {};
+
+using TestTypes = ::testing::Types<pcl::PointXYZ, pcl::PointXYZRGBA>;
+TYPED_TEST_SUITE(OctreeDeCompressionTest, TestTypes);
+
+TYPED_TEST (OctreeDeCompressionTest, RandomClouds)
{
srand(static_cast<unsigned int> (time(NULL)));
-
+ for (const double MAX_XYZ : {1.0, 1024.0}) { // Small clouds, large clouds
// iterate over all pre-defined compression profiles
- for (int compression_profile = pcl::io::LOW_RES_ONLINE_COMPRESSION_WITHOUT_COLOR;
- compression_profile != pcl::io::COMPRESSION_PROFILE_COUNT; ++compression_profile) {
- // instantiate point cloud compression encoder/decoder
- pcl::io::OctreePointCloudCompression<pcl::PointXYZRGBA> pointcloud_encoder((pcl::io::compression_Profiles_e) compression_profile, false);
- pcl::io::OctreePointCloudCompression<pcl::PointXYZRGBA> pointcloud_decoder;
- pcl::PointCloud<pcl::PointXYZRGBA>::Ptr cloud_out(new pcl::PointCloud<pcl::PointXYZRGBA>());
- // iterate over runs
- for (int test_idx = 0; test_idx < NUMBER_OF_TEST_RUNS; test_idx++, total_runs++)
- {
- try
+ for (int compression_profile = pcl::io::LOW_RES_ONLINE_COMPRESSION_WITHOUT_COLOR;
+ compression_profile != pcl::io::COMPRESSION_PROFILE_COUNT; ++compression_profile) {
+ // instantiate point cloud compression encoder/decoder
+ pcl::io::OctreePointCloudCompression<TypeParam> pointcloud_encoder((pcl::io::compression_Profiles_e) compression_profile, false);
+ pcl::io::OctreePointCloudCompression<TypeParam> pointcloud_decoder;
+ typename pcl::PointCloud<TypeParam>::Ptr cloud_out(new pcl::PointCloud<TypeParam>());
+ // iterate over runs
+ for (int test_idx = 0; test_idx < NUMBER_OF_TEST_RUNS; test_idx++, total_runs++)
{
- int point_count = MAX_POINTS * rand() / RAND_MAX;
- if (point_count < 1)
- { // empty point cloud hangs decoder
- total_runs--;
- continue;
- }
- // create shared pointcloud instances
- pcl::PointCloud<pcl::PointXYZRGBA>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZRGBA>());
- // assign input point clouds to octree
- // create random point cloud
- for (int point = 0; point < point_count; point++)
- {
- // gereate a random point
- pcl::PointXYZRGBA new_point;
- new_point.x = static_cast<float> (MAX_XYZ * rand() / RAND_MAX);
- new_point.y = static_cast<float> (MAX_XYZ * rand() / RAND_MAX),
- new_point.z = static_cast<float> (MAX_XYZ * rand() / RAND_MAX);
- new_point.r = static_cast<int> (MAX_COLOR * rand() / RAND_MAX);
- new_point.g = static_cast<int> (MAX_COLOR * rand() / RAND_MAX);
- new_point.b = static_cast<int> (MAX_COLOR * rand() / RAND_MAX);
- new_point.a = static_cast<int> (MAX_COLOR * rand() / RAND_MAX);
- // OctreePointCloudPointVector can store all points..
- cloud->push_back(new_point);
- }
+ auto cloud = generateRandomCloud<TypeParam>(MAX_XYZ);
+ EXPECT_EQ(cloud->height, 1);
-// std::cout << "Run: " << total_runs << " compression profile:" << compression_profile << " point_count: " << point_count;
+// std::cout << "Run: " << total_runs << " compression profile:" << compression_profile << " point_count: " << point_count;
std::stringstream compressed_data;
pointcloud_encoder.encodePointCloud(cloud, compressed_data);
pointcloud_decoder.decodePointCloud(compressed_data, cloud_out);
- EXPECT_GT((int)cloud_out->width, 0) << "decoded PointCloud width <= 0";
- EXPECT_GT((int)cloud_out->height, 0) << " decoded PointCloud height <= 0 ";
- }
- catch (std::exception& e)
- {
- std::cout << e.what() << std::endl;
- }
- } // runs
- } // compression profiles
+ if (pcl::io::compressionProfiles_[compression_profile].doVoxelGridDownSampling) {
+ EXPECT_GT(cloud_out->width, 0);
+ EXPECT_LE(cloud_out->width, cloud->width) << "cloud width after encoding and decoding greater than before. Profile: " << compression_profile;
+ }
+ else {
+ EXPECT_EQ(cloud_out->width, cloud->width) << "cloud width after encoding and decoding not the same. Profile: " << compression_profile;
+ }
+ EXPECT_EQ(cloud_out->height, 1) << "cloud height after encoding and decoding should be 1 (as before). Profile: " << compression_profile;
+ } // runs
+ } // compression profiles
+ } // small clouds, large clouds
} // TEST
-TEST (PCL, OctreeDeCompressionRandomPointXYZ)
+TEST (PCL, OctreeDeCompressionRandomPointXYZRGBASameCloud)
{
+ // Generate a random cloud. Put it into the encoder several times and make
+ // sure that the decoded cloud has correct width and height each time.
+ const double MAX_XYZ = 1.0;
srand(static_cast<unsigned int> (time(NULL)));
-
// iterate over all pre-defined compression profiles
for (int compression_profile = pcl::io::LOW_RES_ONLINE_COMPRESSION_WITHOUT_COLOR;
- compression_profile != pcl::io::COMPRESSION_PROFILE_COUNT; ++compression_profile)
- {
+ compression_profile != pcl::io::COMPRESSION_PROFILE_COUNT; ++compression_profile) {
// instantiate point cloud compression encoder/decoder
- pcl::io::OctreePointCloudCompression<pcl::PointXYZ> pointcloud_encoder((pcl::io::compression_Profiles_e) compression_profile, false);
- pcl::io::OctreePointCloudCompression<pcl::PointXYZ> pointcloud_decoder;
- pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_out(new pcl::PointCloud<pcl::PointXYZ>());
- // loop over runs
+ pcl::io::OctreePointCloudCompression<pcl::PointXYZRGBA> pointcloud_encoder((pcl::io::compression_Profiles_e) compression_profile, false);
+ pcl::io::OctreePointCloudCompression<pcl::PointXYZRGBA> pointcloud_decoder;
+
+ auto cloud = generateRandomCloud<pcl::PointXYZRGBA>(MAX_XYZ);
+ EXPECT_EQ(cloud->height, 1);
+
+ // iterate over runs
for (int test_idx = 0; test_idx < NUMBER_OF_TEST_RUNS; test_idx++, total_runs++)
{
- int point_count = MAX_POINTS * rand() / RAND_MAX;
- // create shared pointcloud instances
- pcl::PointCloud<pcl::PointXYZ>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZ>());
- // assign input point clouds to octree
- // create random point cloud
- for (int point = 0; point < point_count; point++)
- {
- // generate a random point
- pcl::PointXYZ new_point(static_cast<float> (MAX_XYZ * rand() / RAND_MAX),
- static_cast<float> (MAX_XYZ * rand() / RAND_MAX),
- static_cast<float> (MAX_XYZ * rand() / RAND_MAX));
- cloud->push_back(new_point);
- }
-// std::cout << "Run: " << total_runs << " compression profile:" << compression_profile << " point_count: " << point_count;
+// std::cout << "Run: " << total_runs << " compression profile:" << compression_profile << " point_count: " << point_count;
std::stringstream compressed_data;
- try
- { // decodePointCloud() throws exceptions on errors
- pointcloud_encoder.encodePointCloud(cloud, compressed_data);
- pointcloud_decoder.decodePointCloud(compressed_data, cloud_out);
- EXPECT_GT((int)cloud_out->width, 0) << "decoded PointCloud width <= 0";
- EXPECT_GT((int)cloud_out->height, 0) << " decoded PointCloud height <= 0 ";
+ pcl::PointCloud<pcl::PointXYZRGBA>::Ptr cloud_out(new pcl::PointCloud<pcl::PointXYZRGBA>());
+ pointcloud_encoder.encodePointCloud(cloud, compressed_data);
+ pointcloud_decoder.decodePointCloud(compressed_data, cloud_out);
+ if (pcl::io::compressionProfiles_[compression_profile].doVoxelGridDownSampling) {
+ EXPECT_GT(cloud_out->width, 0);
+ EXPECT_LE(cloud_out->width, cloud->width) << "cloud width after encoding and decoding greater than before. Profile: " << compression_profile;
}
- catch (std::exception& e)
- {
- std::cout << e.what() << std::endl;
+ else {
+ EXPECT_EQ(cloud_out->width, cloud->width) << "cloud width after encoding and decoding not the same. Profile: " << compression_profile;
}
+ EXPECT_EQ(cloud_out->height, 1) << "cloud height after encoding and decoding should be 1 (as before). Profile: " << compression_profile;
} // runs
} // compression profiles
} // TEST
// load point cloud from file, when present
if (pcd_file == NULL) return;
- int rv = pcl::io::loadPCDFile(pcd_file, *input_cloud_ptr);
- float voxel_sizes[] = { 0.1, 0.01 };
+ int rv = pcl::io::loadPCDFile(pcd_file, *input_cloud_ptr);
+ float voxel_sizes[] = { 0.1, 0.01 };
- EXPECT_EQ(rv, 0) << " loadPCDFile " << pcd_file;
- EXPECT_GT((int) input_cloud_ptr->width , 0) << "invalid point cloud width from " << pcd_file;
- EXPECT_GT((int) input_cloud_ptr->height, 0) << "invalid point cloud height from " << pcd_file;
+ EXPECT_EQ(rv, 0) << " loadPCDFile " << pcd_file;
+ EXPECT_GT(input_cloud_ptr->width , 0) << "invalid point cloud width from " << pcd_file;
+ EXPECT_GT(input_cloud_ptr->height, 0) << "invalid point cloud height from " << pcd_file;
- // iterate over compression profiles
- for (int compression_profile = pcl::io::LOW_RES_ONLINE_COMPRESSION_WITHOUT_COLOR;
- compression_profile != pcl::io::COMPRESSION_PROFILE_COUNT; ++compression_profile) {
+ // iterate over compression profiles
+ for (int compression_profile = pcl::io::LOW_RES_ONLINE_COMPRESSION_WITHOUT_COLOR;
+ compression_profile != pcl::io::COMPRESSION_PROFILE_COUNT; ++compression_profile) {
// instantiate point cloud compression encoder/decoder
- pcl::io::OctreePointCloudCompression<pcl::PointXYZRGB>* PointCloudEncoder = new pcl::io::OctreePointCloudCompression<pcl::PointXYZRGB>((pcl::io::compression_Profiles_e) compression_profile, false);
- pcl::io::OctreePointCloudCompression<pcl::PointXYZRGB>* PointCloudDecoder = new pcl::io::OctreePointCloudCompression<pcl::PointXYZRGB>();
+ pcl::io::OctreePointCloudCompression<pcl::PointXYZRGB> PointCloudEncoder((pcl::io::compression_Profiles_e) compression_profile, false);
+ pcl::io::OctreePointCloudCompression<pcl::PointXYZRGB> PointCloudDecoder;
// iterate over various voxel sizes
for (std::size_t i = 0; i < sizeof(voxel_sizes)/sizeof(voxel_sizes[0]); i++) {
pcl::octree::OctreePointCloud<pcl::PointXYZRGB> octree(voxel_sizes[i]);
- pcl::PointCloud<pcl::PointXYZRGB>::Ptr octree_out(new pcl::PointCloud<pcl::PointXYZRGB>());
+ pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud_out(new pcl::PointCloud<pcl::PointXYZRGB>());
octree.setInputCloud((*input_cloud_ptr).makeShared());
octree.addPointsFromInputCloud();
std::stringstream compressedData;
- PointCloudEncoder->encodePointCloud(octree.getInputCloud(), compressedData);
- PointCloudDecoder->decodePointCloud(compressedData, octree_out);
- EXPECT_GT((int)octree_out->width, 0) << "decoded PointCloud width <= 0";
- EXPECT_GT((int)octree_out->height, 0) << " decoded PointCloud height <= 0 ";
+ PointCloudEncoder.encodePointCloud(octree.getInputCloud(), compressedData);
+ PointCloudDecoder.decodePointCloud(compressedData, cloud_out);
+ EXPECT_GT(cloud_out->width, 0) << "decoded PointCloud width <= 0";
+ EXPECT_GT(cloud_out->height, 0) << " decoded PointCloud height <= 0 ";
total_runs++;
}
- delete PointCloudDecoder;
- delete PointCloudEncoder;
}
}
pcl::PLYReader Reader;
Reader.read(PLYTest::mesh_file_ply_, cloud);
- ASSERT_EQ (cloud.empty(), false);
+ ASSERT_FALSE (cloud.empty());
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
pcl::PLYReader Reader;
Reader.read(PLYTest::mesh_file_ply_, cloud);
- ASSERT_EQ (cloud.empty(), false);
+ ASSERT_FALSE (cloud.empty());
}
/* ---[ */
remove ("test_mesh_rgba_binary.ply");
}
+
+TEST (PCL, PLYPolygonMeshUintIndices)
+{
+ std::ofstream fs;
+ fs.open ("mesh_uint_indices.ply");
+ fs <<
+ "ply\n"
+ "format ascii 1.0\n"
+ "element vertex 3\n"
+ "property float x\n"
+ "property float y\n"
+ "property float z\n"
+ "element face 1\n"
+ "property list uchar uint vertex_indices\n"
+ "end_header\n"
+ "0.0 0.0 0.0\n"
+ "1.0 0.0 0.0\n"
+ "1.0 1.1 0.0\n"
+ "3 2 0 1\n";
+ fs.close();
+ pcl::PolygonMesh mesh;
+ int const res = pcl::io::loadPLYFile("mesh_uint_indices.ply", mesh);
+ EXPECT_NE (res, -1);
+ EXPECT_EQ (mesh.cloud.width, 3);
+ EXPECT_EQ (mesh.cloud.height, 1);
+ EXPECT_EQ (mesh.polygons.size(), 1);
+ EXPECT_EQ (mesh.polygons.front().vertices.size(), 3);
+
+ remove("mesh_uint_indices.ply");
+}
+
+
/* ---[ */
int
main (int argc, char** argv)
cloud.width = 2;
cloud.height = 2;
cloud.is_dense = true;
- cloud.points.resize (cloud.width * cloud.height);
+ cloud.resize (cloud.width * cloud.height);
for (auto &point : cloud.points)
{
point.normal_x = -1.0;
cloud.width = 2;
cloud.height = 2;
cloud.is_dense = true;
- cloud.points.resize (cloud.width * cloud.height);
+ cloud.resize (cloud.width * cloud.height);
for (auto &point : cloud.points)
{
point.r = 0;
cloud.width = 2;
cloud.height = 2;
cloud.is_dense = true;
- cloud.points.resize (cloud.width * cloud.height);
+ cloud.resize (cloud.width * cloud.height);
for (auto &point : cloud.points)
{
point.r = 0;
cloud.width = 2;
cloud.height = 2;
cloud.is_dense = true;
- cloud.points.resize (cloud.width * cloud.height);
+ cloud.resize (cloud.width * cloud.height);
for (std::size_t i = 0; i < cloud.size (); i++)
cloud[i].label = i;
cloud.width = 2;
cloud.height = 2;
cloud.is_dense = true;
- cloud.points.resize (cloud.width * cloud.height);
+ cloud.resize (cloud.width * cloud.height);
for (std::size_t i = 0; i < cloud.size (); i++)
cloud[i].label = i % 2;
cloud.width = 2;
cloud.height = 2;
cloud.is_dense = true;
- cloud.points.resize (cloud.width * cloud.height);
+ cloud.resize (cloud.width * cloud.height);
for (std::size_t i = 0; i < cloud.size (); i++)
cloud[i].label = i % 2;
cloud.width = 2;
cloud.height = 2;
cloud.is_dense = true;
- cloud.points.resize (cloud.width * cloud.height);
+ cloud.resize (cloud.width * cloud.height);
for (std::size_t i = 0; i < cloud.size (); i++)
cloud[i].z = 1.0 + i;
cloud.width = 2;
cloud.height = 2;
cloud.is_dense = true;
- cloud.points.resize (cloud.width * cloud.height);
+ cloud.resize (cloud.width * cloud.height);
cloud[0].curvature = 1.0;
cloud[1].curvature = 2.0;
cloud.width = 2;
cloud.height = 2;
cloud.is_dense = true;
- cloud.points.resize (cloud.width * cloud.height);
+ cloud.resize (cloud.width * cloud.height);
cloud[0].intensity = 10.0;
cloud[1].intensity = 23.3;
cloud.width = 2;
cloud.height = 2;
cloud.is_dense = true;
- cloud.points.resize (cloud.width * cloud.height);
+ cloud.resize (cloud.width * cloud.height);
pcl::PCLImage image;
{
cloud.width = 2;
cloud.height = 2;
cloud.is_dense = false;
- cloud.points.resize (cloud.width * cloud.height);
+ cloud.resize (cloud.width * cloud.height);
cloud[0].curvature = 1.0;
cloud[1].curvature = 2.0;
for (float z = -0.5f; z <= 0.5f; z += resolution)
for (float y = -0.5f; y <= 0.5f; y += resolution)
for (float x = -0.5f; x <= 0.5f; x += resolution)
- cloud.points.emplace_back(x, y, z);
+ cloud.emplace_back(x, y, z);
cloud.width = cloud.size ();
cloud.height = 1;
srand (static_cast<unsigned int> (time (nullptr)));
// Randomly create a new point cloud
for (std::size_t i = 0; i < cloud_big.width * cloud_big.height; ++i)
- cloud_big.points.emplace_back(static_cast<float> (1024 * rand () / (RAND_MAX + 1.0)),
+ cloud_big.emplace_back(static_cast<float> (1024 * rand () / (RAND_MAX + 1.0)),
static_cast<float> (1024 * rand () / (RAND_MAX + 1.0)),
static_cast<float> (1024 * rand () / (RAND_MAX + 1.0)));
}
for (std::size_t i=0; i < cloud.size(); ++i)
if (euclideanDistance(cloud[i], test_point) < max_dist)
brute_force_result.insert(i);
- std::vector<int> k_indices;
+ pcl::Indices k_indices;
std::vector<float> k_distances;
kdtree.radiusSearch (test_point, max_dist, k_indices, k_distances, 100);
//std::cout << k_indices.size()<<"=="<<brute_force_result.size()<<"?\n";
- for (const int &k_index : k_indices)
+ for (const auto &k_index : k_indices)
{
std::set<int>::iterator brute_force_result_it = brute_force_result.find (k_index);
bool ok = brute_force_result_it != brute_force_result.end ();
++counter;
}
- std::vector<int> k_indices;
+ pcl::Indices k_indices;
k_indices.resize (no_of_neighbors);
std::vector<float> k_distances;
k_distances.resize (no_of_neighbors);
EXPECT_EQ (k_indices.size (), no_of_neighbors);
// Check if all found neighbors have distance smaller than max_dist
- for (const int &k_index : k_indices)
+ for (const auto &k_index : k_indices)
{
const MyPoint& point = cloud[k_index];
bool ok = euclideanDistance (test_point, point) <= max_dist;
// Find k nearest neighbors
const int k = 10;
- std::vector<int> k_indices (k);
+ pcl::Indices k_indices (k);
std::vector<float> k_distances (k);
kdtree.nearestKSearch (p, k, k_indices, k_distances);
for (int i = 0; i < k; ++i)
KdTreeFLANN<PointXYZ> tree;
tree.setInputCloud (cloud_in);
- std::vector<std::vector<int> > nn_indices_vector;
+ std::vector<pcl::Indices > nn_indices_vector;
for (std::size_t i = 0; i < cloud_in->size (); ++i)
if (isFinite ((*cloud_in)[i]))
{
- std::vector<int> nn_indices;
+ pcl::Indices nn_indices;
std::vector<float> nn_dists;
tree.radiusSearch ((*cloud_in)[i], 0.02, nn_indices, nn_dists);
}
Eigen::MatrixXf diff_of_gauss;
- std::vector<int> nn_indices;
+ pcl::Indices nn_indices;
std::vector<float> nn_dist;
diff_of_gauss.resize (input.size (), scales.size () - 1);
tree.radiusSearch (i_point, max_radius, nn_indices, nn_dist);
// Are they all unique?
- std::set<int> unique_indices;
- for (const int &nn_index : nn_indices)
+ std::set<pcl::index_t> unique_indices;
+ for (const auto &nn_index : nn_indices)
{
unique_indices.insert (nn_index);
}
--- /dev/null
+set(SUBSYS_NAME tests_ml)
+set(SUBSYS_DESC "Point cloud library ml module unit tests")
+PCL_SET_TEST_DEPENDENCIES(SUBSYS_DEPS ml)
+
+set(DEFAULT ON)
+set(build TRUE)
+PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ${DEFAULT} "${REASON}")
+PCL_SUBSYS_DEPEND(build "${SUBSYS_NAME}" DEPS ${SUBSYS_DEPS} OPT_DEPS ${OPT_DEPS})
+
+if(NOT build)
+ return()
+endif()
+
+PCL_ADD_TEST(ml_kmeans test_ml_kmeans FILES test_kmeans.cpp LINK_WITH pcl_gtest pcl_common pcl_ml)
--- /dev/null
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Point Cloud Library (PCL) - www.pointclouds.org
+ * Copyright (c) 2020-, Open Perception
+ *
+ * All rights reserved
+ */
+#include <pcl/test/gtest.h>
+#include <pcl/common/random.h>
+#include <pcl/ml/kmeans.h>
+
+using namespace pcl;
+using namespace pcl::common;
+
+using Point = std::vector<float>;
+
+// Prepare random number generator in PCL
+UniformGenerator<float> engine (-100000.0, 100000.0, 2021);
+
+class SampleDataChecker
+{
+ public:
+ int data_size_;
+ int dim_;
+ int cluster_size_;
+ std::vector<Point> data_sequence_;
+ std::vector<Point> answer_centroids_;
+
+ // Create sample data
+ void
+ createDataSequence ()
+ {
+ for (int data_id = 0; data_id < data_size_; ++data_id)
+ {
+ Point data;
+ for (int dim_i = 0; dim_i < dim_; ++dim_i)
+ data.push_back (engine.run ());
+ data_sequence_.push_back (data);
+ }
+ }
+
+ void
+ testKmeans (Kmeans& k_means)
+ {
+ k_means.setClusterSize (cluster_size_);
+ k_means.setInputData (data_sequence_);
+ k_means.initialClusterPoints ();
+ k_means.computeCentroids ();
+
+ // Input centroids that should be the correct answer
+ answer_centroids_ = k_means.get_centroids ();
+
+ // If centroids_ was initialized before calculating it,
+ // then it should not change
+ // no matter how many times this class method is called.
+ k_means.computeCentroids ();
+ k_means.computeCentroids ();
+ }
+};
+
+//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+TEST (ComputeCentroids, Case1)
+{
+ // Create sample data sequence
+ SampleDataChecker sdc;
+ sdc.data_size_ = 20;
+ sdc.dim_ = 21;
+ sdc.cluster_size_ = 9;
+ sdc.createDataSequence ();
+
+ // Compute centroids with K-means
+ Kmeans k_means (sdc.data_size_, sdc.dim_);
+ sdc.testKmeans (k_means);
+
+ // Evaluate if the two centroids are the same
+ EXPECT_EQ (sdc.cluster_size_, k_means.get_centroids ().size ());
+ EXPECT_EQ (sdc.answer_centroids_, k_means.get_centroids ());
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+TEST (ComputeCentroids, Case2)
+{
+ // Create sample data sequence
+ SampleDataChecker sdc;
+ sdc.data_size_ = 1;
+ sdc.dim_ = 1;
+ sdc.cluster_size_ = 1;
+ sdc.createDataSequence ();
+
+ // Compute centroids with K-means
+ Kmeans k_means (sdc.data_size_, sdc.dim_);
+ sdc.testKmeans (k_means);
+
+ // Evaluate if the two centroids are the same
+ EXPECT_EQ (sdc.cluster_size_, k_means.get_centroids ().size ());
+ EXPECT_EQ (sdc.answer_centroids_, k_means.get_centroids ());
+}
+
+/* ---[ */
+int
+main (int argc, char** argv)
+{
+ testing::InitGoogleTest (&argc, argv);
+ return (RUN_ALL_TESTS ());
+}
+/* ]--- */
// test iterator
unsigned int leaf_count = 0;
- std::vector<int> indexVector;
+ Indices indexVector;
// iterate over tree
for (auto it = octree.leaf_depth_begin(), it_end = octree.leaf_depth_end(); it != it_end; ++it)
container.getPointIndices (indexVector);
// test points against bounding box of leaf node
- std::vector<int> tmpVector;
+ Indices tmpVector;
container.getPointIndices (tmpVector);
Eigen::Vector3f min_pt, max_pt;
octree.getVoxelBounds (it, min_pt, max_pt);
- for (const int &i : tmpVector)
+ for (const auto &i : tmpVector)
{
ASSERT_GE ((*cloud)[i].x, min_pt(0));
ASSERT_GE ((*cloud)[i].y, min_pt(1));
leafVectorA.pop_back ();
bool bFound = false;
- for (const int &value : data)
+ for (const auto &value : data)
if (value == leafInt)
{
bFound = true;
leafVectorA.pop_back ();
bool bFound = false;
- for (const int &value : data)
+ for (const auto &value : data)
if (value == leafInt)
{
bFound = true;
for (std::size_t i = 0; i < cloudB->size (); i++)
{
- std::vector<int> pointIdxVec;
+ Indices pointIdxVec;
octreeB.voxelSearch ((*cloudB)[i], pointIdxVec);
bool bIdxFound = false;
- std::vector<int>::const_iterator current = pointIdxVec.begin ();
- while (current != pointIdxVec.end ())
+ auto current = pointIdxVec.cbegin ();
+ while (current != pointIdxVec.cend ())
{
- if (*current == static_cast<int> (i))
+ if (*current == static_cast<pcl::index_t> (i))
{
bIdxFound = true;
break;
octreeA.setInputCloud (cloudIn);
octreeA.addPointsFromInputCloud ();
- std::vector<int> indexVector;
+ Indices indexVector;
unsigned int leafNodeCounter = 0;
for (auto it1 = octreeA.leaf_depth_begin(), it1_end = octreeA.leaf_depth_end(); it1 != it1_end; ++it1)
for (auto bfIt = octreeA.breadth_begin(); bfIt != octreeA.breadth_end(); ++bfIt)
{
// tree depth of visited nodes must grow
- ASSERT_TRUE (bfIt.getCurrentOctreeDepth () >= lastDepth);
+ ASSERT_GE (bfIt.getCurrentOctreeDepth (), lastDepth);
lastDepth = bfIt.getCurrentOctreeDepth ();
if (bfIt.isBranchNode ())
cloudIn);
}
- std::vector<int> newPointIdxVector;
+ Indices newPointIdxVector;
// get a vector of new points, which did not exist in previous buffer
octree.getPointIndicesFromNewVoxels (newPointIdxVector);
// all point indices found should have an index of >= 1000
for (std::size_t i = 0; i < 1000; i++)
{
- ASSERT_TRUE (newPointIdxVector[i] >= 1000);
+ ASSERT_GE (newPointIdxVector[i], 1000);
}
}
OctreePointCloudSearch<PointXYZ> octree (0.1);
octree.setInputCloud (cloudIn);
- std::vector<int> k_indices;
+ Indices k_indices;
std::vector<float> k_sqr_distances;
- std::vector<int> k_indices_bruteforce;
+ Indices k_indices_bruteforce;
std::vector<float> k_sqr_distances_bruteforce;
- for (unsigned int test_id = 0; test_id < test_runs; test_id++)
- {
- // define a random search point
+ for (const std::size_t maxObjsPerLeaf: {0, 5}) {
+ if (maxObjsPerLeaf != 0)
+ octree.enableDynamicDepth (maxObjsPerLeaf);
+ for (unsigned int test_id = 0; test_id < test_runs; test_id++)
+ {
+ // define a random search point
- PointXYZ searchPoint (static_cast<float> (10.0 * rand () / RAND_MAX),
- static_cast<float> (10.0 * rand () / RAND_MAX),
- static_cast<float> (10.0 * rand () / RAND_MAX));
+ PointXYZ searchPoint (static_cast<float> (10.0 * rand () / RAND_MAX),
+ static_cast<float> (10.0 * rand () / RAND_MAX),
+ static_cast<float> (10.0 * rand () / RAND_MAX));
- unsigned int K = 1 + rand () % 10;
+ unsigned int K = 1 + rand () % 10;
- // generate point cloud
- cloudIn->width = 1000;
- cloudIn->height = 1;
- cloudIn->points.resize (cloudIn->width * cloudIn->height);
- for (std::size_t i = 0; i < 1000; i++)
- {
- (*cloudIn)[i] = PointXYZ (static_cast<float> (5.0 * rand () / RAND_MAX),
- static_cast<float> (10.0 * rand () / RAND_MAX),
- static_cast<float> (10.0 * rand () / RAND_MAX));
- }
+ // generate point cloud
+ cloudIn->width = 1000;
+ cloudIn->height = 1;
+ cloudIn->points.resize (cloudIn->width * cloudIn->height);
+ for (std::size_t i = 0; i < 1000; i++)
+ {
+ (*cloudIn)[i] = PointXYZ (static_cast<float> (5.0 * rand () / RAND_MAX),
+ static_cast<float> (10.0 * rand () / RAND_MAX),
+ static_cast<float> (10.0 * rand () / RAND_MAX));
+ }
- k_indices.clear ();
- k_sqr_distances.clear ();
+ k_indices.clear ();
+ k_sqr_distances.clear ();
- k_indices_bruteforce.clear ();
- k_sqr_distances_bruteforce.clear ();
+ k_indices_bruteforce.clear ();
+ k_sqr_distances_bruteforce.clear ();
- // push all points and their distance to the search point into a priority queue - bruteforce approach.
- for (std::size_t i = 0; i < cloudIn->size (); i++)
- {
- double pointDist = (((*cloudIn)[i].x - searchPoint.x) * ((*cloudIn)[i].x - searchPoint.x) +
- ((*cloudIn)[i].y - searchPoint.y) * ((*cloudIn)[i].y - searchPoint.y) +
- ((*cloudIn)[i].z - searchPoint.z) * ((*cloudIn)[i].z - searchPoint.z));
+ // push all points and their distance to the search point into a priority queue - bruteforce approach.
+ for (std::size_t i = 0; i < cloudIn->size (); i++)
+ {
+ double pointDist = (((*cloudIn)[i].x - searchPoint.x) * ((*cloudIn)[i].x - searchPoint.x) +
+ ((*cloudIn)[i].y - searchPoint.y) * ((*cloudIn)[i].y - searchPoint.y) +
+ ((*cloudIn)[i].z - searchPoint.z) * ((*cloudIn)[i].z - searchPoint.z));
- prioPointQueueEntry pointEntry ((*cloudIn)[i], pointDist, static_cast<int> (i));
+ prioPointQueueEntry pointEntry ((*cloudIn)[i], pointDist, static_cast<int> (i));
- pointCandidates.push (pointEntry);
- }
+ pointCandidates.push (pointEntry);
+ }
- // pop priority queue until we have the nearest K elements
- while (pointCandidates.size () > K)
- pointCandidates.pop ();
+ // pop priority queue until we have the nearest K elements
+ while (pointCandidates.size () > K)
+ pointCandidates.pop ();
- // copy results into vectors
- unsigned idx = static_cast<unsigned> (pointCandidates.size ());
- k_indices_bruteforce.resize (idx);
- k_sqr_distances_bruteforce.resize (idx);
- while (!pointCandidates.empty ())
- {
- --idx;
- k_indices_bruteforce [idx] = pointCandidates.top ().pointIdx_;
- k_sqr_distances_bruteforce [idx] = static_cast<float> (pointCandidates.top ().pointDistance_);
+ // copy results into vectors
+ unsigned idx = static_cast<unsigned> (pointCandidates.size ());
+ k_indices_bruteforce.resize (idx);
+ k_sqr_distances_bruteforce.resize (idx);
+ while (!pointCandidates.empty ())
+ {
+ --idx;
+ k_indices_bruteforce [idx] = pointCandidates.top ().pointIdx_;
+ k_sqr_distances_bruteforce [idx] = static_cast<float> (pointCandidates.top ().pointDistance_);
- pointCandidates.pop ();
- }
+ pointCandidates.pop ();
+ }
- // octree nearest neighbor search
- octree.deleteTree ();
- octree.addPointsFromInputCloud ();
- octree.nearestKSearch (searchPoint, static_cast<int> (K), k_indices, k_sqr_distances);
+ // octree nearest neighbor search
+ octree.deleteTree ();
+ octree.addPointsFromInputCloud ();
+ octree.nearestKSearch (searchPoint, static_cast<int> (K), k_indices, k_sqr_distances);
- ASSERT_EQ (k_indices_bruteforce.size (), k_indices.size());
+ ASSERT_EQ (k_indices_bruteforce.size (), k_indices.size());
- // compare nearest neighbor results of octree with bruteforce search
- while (!k_indices_bruteforce.empty ())
- {
- ASSERT_EQ (k_indices_bruteforce.back(), k_indices.back ());
- EXPECT_NEAR (k_sqr_distances_bruteforce.back (), k_sqr_distances.back (), 1e-4);
+ // compare nearest neighbor results of octree with bruteforce search
+ while (!k_indices_bruteforce.empty ())
+ {
+ ASSERT_EQ (k_indices_bruteforce.back(), k_indices.back ());
+ EXPECT_NEAR (k_sqr_distances_bruteforce.back (), k_sqr_distances.back (), 1e-4);
- k_indices_bruteforce.pop_back ();
- k_indices.pop_back ();
+ k_indices_bruteforce.pop_back ();
+ k_indices.pop_back ();
- k_sqr_distances_bruteforce.pop_back ();
- k_sqr_distances.pop_back ();
+ k_sqr_distances_bruteforce.pop_back ();
+ k_sqr_distances.pop_back ();
+ }
}
}
}
OctreePointCloudSearch<PointXYZ> octree (1);
octree.setInputCloud (cloudIn);
- for (unsigned int test_id = 0; test_id < test_runs; test_id++)
- {
- std::vector<int> k_indices;
-
- // generate point cloud
- cloudIn->width = 300;
- cloudIn->height = 1;
- cloudIn->points.resize (cloudIn->width * cloudIn->height);
- for (auto &point : cloudIn->points)
+ for (const std::size_t maxObjsPerLeaf: {0, 5}) {
+ if (maxObjsPerLeaf != 0)
+ octree.enableDynamicDepth (maxObjsPerLeaf);
+ for (unsigned int test_id = 0; test_id < test_runs; test_id++)
{
- point = PointXYZ (static_cast<float> (10.0 * rand () / RAND_MAX),
- static_cast<float> (10.0 * rand () / RAND_MAX),
- static_cast<float> (10.0 * rand () / RAND_MAX));
- }
-
+ Indices k_indices;
- // octree points to octree
- octree.deleteTree ();
- octree.addPointsFromInputCloud ();
+ // generate point cloud
+ cloudIn->width = 300;
+ cloudIn->height = 1;
+ cloudIn->points.resize (cloudIn->width * cloudIn->height);
+ for (auto &point : cloudIn->points)
+ {
+ point = PointXYZ (static_cast<float> (10.0 * rand () / RAND_MAX),
+ static_cast<float> (10.0 * rand () / RAND_MAX),
+ static_cast<float> (10.0 * rand () / RAND_MAX));
+ }
- // define a random search area
- Eigen::Vector3f lowerBoxCorner (static_cast<float> (4.0 * rand () / RAND_MAX),
- static_cast<float> (4.0 * rand () / RAND_MAX),
- static_cast<float> (4.0 * rand () / RAND_MAX));
- Eigen::Vector3f upperBoxCorner (static_cast<float> (5.0 + 4.0 * rand () / RAND_MAX),
- static_cast<float> (5.0 + 4.0 * rand () / RAND_MAX),
- static_cast<float> (5.0 + 4.0 * rand () / RAND_MAX));
+ // octree points to octree
+ octree.deleteTree ();
+ octree.addPointsFromInputCloud ();
- octree.boxSearch (lowerBoxCorner, upperBoxCorner, k_indices);
+ // define a random search area
- // test every point in point cloud
- for (std::size_t i = 0; i < 300; i++)
- {
- bool inBox;
- bool idxInResults;
- const PointXYZ& pt = (*cloudIn)[i];
+ Eigen::Vector3f lowerBoxCorner (static_cast<float> (4.0 * rand () / RAND_MAX),
+ static_cast<float> (4.0 * rand () / RAND_MAX),
+ static_cast<float> (4.0 * rand () / RAND_MAX));
+ Eigen::Vector3f upperBoxCorner (static_cast<float> (5.0 + 4.0 * rand () / RAND_MAX),
+ static_cast<float> (5.0 + 4.0 * rand () / RAND_MAX),
+ static_cast<float> (5.0 + 4.0 * rand () / RAND_MAX));
- inBox = (pt.x >= lowerBoxCorner (0)) && (pt.x <= upperBoxCorner (0)) &&
- (pt.y >= lowerBoxCorner (1)) && (pt.y <= upperBoxCorner (1)) &&
- (pt.z >= lowerBoxCorner (2)) && (pt.z <= upperBoxCorner (2));
+ octree.boxSearch (lowerBoxCorner, upperBoxCorner, k_indices);
- idxInResults = false;
- for (std::size_t j = 0; (j < k_indices.size ()) && (!idxInResults); ++j)
+ // test every point in point cloud
+ for (std::size_t i = 0; i < cloudIn->size(); i++)
{
- if (i == static_cast<unsigned int> (k_indices[j]))
- idxInResults = true;
- }
+ bool inBox;
+ bool idxInResults;
+ const PointXYZ& pt = (*cloudIn)[i];
+
+ inBox = (pt.x >= lowerBoxCorner (0)) && (pt.x <= upperBoxCorner (0)) &&
+ (pt.y >= lowerBoxCorner (1)) && (pt.y <= upperBoxCorner (1)) &&
+ (pt.z >= lowerBoxCorner (2)) && (pt.z <= upperBoxCorner (2));
- ASSERT_EQ (idxInResults, inBox);
+ idxInResults = false;
+ for (std::size_t j = 0; (j < k_indices.size ()) && (!idxInResults); ++j)
+ {
+ if (i == static_cast<unsigned int> (k_indices[j]))
+ idxInResults = true;
+ }
+
+ ASSERT_EQ (idxInResults, inBox);
+ }
}
}
}
{
constexpr unsigned int test_runs = 100;
- unsigned int bestMatchCount = 0;
-
- // instantiate point cloud
- PointCloud<PointXYZ>::Ptr cloudIn (new PointCloud<PointXYZ> ());
-
- srand (static_cast<unsigned int> (time (nullptr)));
+ for (const std::size_t maxObjsPerLeaf: {0, 5}) {
+ unsigned int bestMatchCount = 0;
- constexpr double voxelResolution = 0.1;
+ // instantiate point cloud
+ PointCloud<PointXYZ>::Ptr cloudIn (new PointCloud<PointXYZ> ());
- // create octree
- OctreePointCloudSearch<PointXYZ> octree (voxelResolution);
- octree.setInputCloud (cloudIn);
+ srand (static_cast<unsigned int> (time (nullptr)));
- for (unsigned int test_id = 0; test_id < test_runs; test_id++)
- {
- // define a random search point
+ constexpr double voxelResolution = 0.1;
- PointXYZ searchPoint (static_cast<float> (10.0 * rand () / RAND_MAX),
- static_cast<float> (10.0 * rand () / RAND_MAX),
- static_cast<float> (10.0 * rand () / RAND_MAX));
+ // create octree
+ OctreePointCloudSearch<PointXYZ> octree (voxelResolution);
+ octree.setInputCloud (cloudIn);
+ if (maxObjsPerLeaf != 0)
+ octree.enableDynamicDepth (maxObjsPerLeaf);
- // generate point cloud
- cloudIn->width = 1000;
- cloudIn->height = 1;
- cloudIn->points.resize (cloudIn->width * cloudIn->height);
- for (std::size_t i = 0; i < 1000; i++)
+ for (unsigned int test_id = 0; test_id < test_runs; test_id++)
{
- (*cloudIn)[i] = PointXYZ (static_cast<float> (5.0 * rand () / RAND_MAX),
- static_cast<float> (10.0 * rand () / RAND_MAX),
- static_cast<float> (10.0 * rand () / RAND_MAX));
- }
+ // define a random search point
- // brute force search
- double BFdistance = std::numeric_limits<double>::max ();
- int BFindex = 0;
+ PointXYZ searchPoint (static_cast<float> (10.0 * rand () / RAND_MAX),
+ static_cast<float> (10.0 * rand () / RAND_MAX),
+ static_cast<float> (10.0 * rand () / RAND_MAX));
- for (std::size_t i = 0; i < cloudIn->size (); i++)
- {
- double pointDist = (((*cloudIn)[i].x - searchPoint.x) * ((*cloudIn)[i].x - searchPoint.x) +
- ((*cloudIn)[i].y - searchPoint.y) * ((*cloudIn)[i].y - searchPoint.y) +
- ((*cloudIn)[i].z - searchPoint.z) * ((*cloudIn)[i].z - searchPoint.z));
+ // generate point cloud
+ cloudIn->width = 1000;
+ cloudIn->height = 1;
+ cloudIn->points.resize (cloudIn->width * cloudIn->height);
+ for (std::size_t i = 0; i < 1000; i++)
+ {
+ (*cloudIn)[i] = PointXYZ (static_cast<float> (5.0 * rand () / RAND_MAX),
+ static_cast<float> (10.0 * rand () / RAND_MAX),
+ static_cast<float> (10.0 * rand () / RAND_MAX));
+ }
+
+ // brute force search
+ double BFdistance = std::numeric_limits<double>::max ();
+ pcl::index_t BFindex = 0;
- if (pointDist < BFdistance)
+ for (std::size_t i = 0; i < cloudIn->size (); i++)
{
- BFindex = static_cast<int> (i);
- BFdistance = pointDist;
+ double pointDist = (((*cloudIn)[i].x - searchPoint.x) * ((*cloudIn)[i].x - searchPoint.x) +
+ ((*cloudIn)[i].y - searchPoint.y) * ((*cloudIn)[i].y - searchPoint.y) +
+ ((*cloudIn)[i].z - searchPoint.z) * ((*cloudIn)[i].z - searchPoint.z));
+
+ if (pointDist < BFdistance)
+ {
+ BFindex = static_cast<pcl::index_t> (i);
+ BFdistance = pointDist;
+ }
+
}
- }
+ index_t ANNindex;
+ float ANNdistance;
- int ANNindex;
- float ANNdistance;
+ // octree approx. nearest neighbor search
+ octree.deleteTree ();
+ octree.addPointsFromInputCloud ();
+ octree.approxNearestSearch (searchPoint, ANNindex, ANNdistance);
- // octree approx. nearest neighbor search
- octree.deleteTree ();
- octree.addPointsFromInputCloud ();
- octree.approxNearestSearch (searchPoint, ANNindex, ANNdistance);
+ if (BFindex == ANNindex)
+ {
+ EXPECT_NEAR (ANNdistance, BFdistance, 1e-4);
+ bestMatchCount++;
+ }
- if (BFindex == ANNindex)
- {
- EXPECT_NEAR (ANNdistance, BFdistance, 1e-4);
- bestMatchCount++;
}
+ // we should have found the absolute nearest neighbor at least once
+ ASSERT_GT (bestMatchCount, 0);
}
-
- // we should have found the absolute nearest neighbor at least once
- ASSERT_TRUE (bestMatchCount > 0);
}
TEST (PCL, Octree_Pointcloud_Neighbours_Within_Radius_Search)
// instantiate point clouds
PointCloud<PointXYZ>::Ptr cloudIn (new PointCloud<PointXYZ> ());
- PointCloud<PointXYZ>::Ptr cloudOut (new PointCloud<PointXYZ> ());
srand (static_cast<unsigned int> (time (nullptr)));
- for (unsigned int test_id = 0; test_id < test_runs; test_id++)
- {
- // define a random search point
- PointXYZ searchPoint (static_cast<float> (10.0 * rand () / RAND_MAX),
- static_cast<float> (10.0 * rand () / RAND_MAX),
- static_cast<float> (10.0 * rand () / RAND_MAX));
-
- cloudIn->width = 1000;
- cloudIn->height = 1;
- cloudIn->points.resize (cloudIn->width * cloudIn->height);
-
- // generate point cloud data
- for (std::size_t i = 0; i < 1000; i++)
+ for (const std::size_t maxObjsPerLeaf: {0, 5}) {
+ for (unsigned int test_id = 0; test_id < test_runs; test_id++)
{
- (*cloudIn)[i] = PointXYZ (static_cast<float> (10.0 * rand () / RAND_MAX),
- static_cast<float> (10.0 * rand () / RAND_MAX),
- static_cast<float> (5.0 * rand () / RAND_MAX));
- }
+ // define a random search point
+ PointXYZ searchPoint (static_cast<float> (10.0 * rand () / RAND_MAX),
+ static_cast<float> (10.0 * rand () / RAND_MAX),
+ static_cast<float> (10.0 * rand () / RAND_MAX));
- OctreePointCloudSearch<PointXYZ> octree (0.001);
+ cloudIn->width = 1000;
+ cloudIn->height = 1;
+ cloudIn->points.resize (cloudIn->width * cloudIn->height);
- // build octree
- octree.setInputCloud (cloudIn);
- octree.addPointsFromInputCloud ();
+ // generate point cloud data
+ for (std::size_t i = 0; i < 1000; i++)
+ {
+ (*cloudIn)[i] = PointXYZ (static_cast<float> (10.0 * rand () / RAND_MAX),
+ static_cast<float> (10.0 * rand () / RAND_MAX),
+ static_cast<float> (5.0 * rand () / RAND_MAX));
+ }
- double pointDist;
- double searchRadius = 5.0 * static_cast<float> (rand () / RAND_MAX);
+ OctreePointCloudSearch<PointXYZ> octree (0.001);
- // bruteforce radius search
- std::vector<int> cloudSearchBruteforce;
- for (std::size_t i = 0; i < cloudIn->size (); i++)
- {
- pointDist = sqrt (
- ((*cloudIn)[i].x - searchPoint.x) * ((*cloudIn)[i].x - searchPoint.x)
- + ((*cloudIn)[i].y - searchPoint.y) * ((*cloudIn)[i].y - searchPoint.y)
- + ((*cloudIn)[i].z - searchPoint.z) * ((*cloudIn)[i].z - searchPoint.z));
+ // build octree
+ octree.setInputCloud (cloudIn);
+ if (maxObjsPerLeaf != 0)
+ octree.enableDynamicDepth (maxObjsPerLeaf);
+ octree.addPointsFromInputCloud ();
- if (pointDist <= searchRadius)
+ double pointDist;
+ double searchRadius = 5.0 * static_cast<float> (rand () / RAND_MAX);
+
+ // bruteforce radius search
+ std::vector<int> cloudSearchBruteforce;
+ for (std::size_t i = 0; i < cloudIn->size (); i++)
{
- // add point candidates to vector list
- cloudSearchBruteforce.push_back (static_cast<int> (i));
+ pointDist = sqrt (
+ ((*cloudIn)[i].x - searchPoint.x) * ((*cloudIn)[i].x - searchPoint.x)
+ + ((*cloudIn)[i].y - searchPoint.y) * ((*cloudIn)[i].y - searchPoint.y)
+ + ((*cloudIn)[i].z - searchPoint.z) * ((*cloudIn)[i].z - searchPoint.z));
+
+ if (pointDist <= searchRadius)
+ {
+ // add point candidates to vector list
+ cloudSearchBruteforce.push_back (static_cast<int> (i));
+ }
}
- }
- std::vector<int> cloudNWRSearch;
- std::vector<float> cloudNWRRadius;
+ Indices cloudNWRSearch;
+ std::vector<float> cloudNWRRadius;
- // execute octree radius search
- octree.radiusSearch (searchPoint, searchRadius, cloudNWRSearch, cloudNWRRadius);
+ // execute octree radius search
+ octree.radiusSearch (searchPoint, searchRadius, cloudNWRSearch, cloudNWRRadius);
- ASSERT_EQ (cloudSearchBruteforce.size (), cloudNWRRadius.size ());
+ ASSERT_EQ (cloudSearchBruteforce.size (), cloudNWRRadius.size ());
- // check if result from octree radius search can be also found in bruteforce search
- std::vector<int>::const_iterator current = cloudNWRSearch.begin ();
- while (current != cloudNWRSearch.end ())
- {
- pointDist = sqrt (
- ((*cloudIn)[*current].x - searchPoint.x) * ((*cloudIn)[*current].x - searchPoint.x)
- + ((*cloudIn)[*current].y - searchPoint.y) * ((*cloudIn)[*current].y - searchPoint.y)
- + ((*cloudIn)[*current].z - searchPoint.z) * ((*cloudIn)[*current].z - searchPoint.z));
+ // check if result from octree radius search can be also found in bruteforce search
+ auto current = cloudNWRSearch.cbegin ();
+ while (current != cloudNWRSearch.cend ())
+ {
+ pointDist = sqrt (
+ ((*cloudIn)[*current].x - searchPoint.x) * ((*cloudIn)[*current].x - searchPoint.x)
+ + ((*cloudIn)[*current].y - searchPoint.y) * ((*cloudIn)[*current].y - searchPoint.y)
+ + ((*cloudIn)[*current].z - searchPoint.z) * ((*cloudIn)[*current].z - searchPoint.z));
- ASSERT_TRUE (pointDist <= searchRadius);
+ ASSERT_LE (pointDist, searchRadius);
- ++current;
- }
+ ++current;
+ }
- // check if result limitation works
- octree.radiusSearch (searchPoint, searchRadius, cloudNWRSearch, cloudNWRRadius, 5);
+ // check if result limitation works
+ octree.radiusSearch (searchPoint, searchRadius, cloudNWRSearch, cloudNWRRadius, 5);
- ASSERT_TRUE (cloudNWRRadius.size () <= 5);
+ ASSERT_LE (cloudNWRRadius.size (), 5);
+ }
}
-
}
TEST (PCL, Octree_Pointcloud_Ray_Traversal)
pcl::PointCloud<pcl::PointXYZ>::VectorType voxelsInRay, voxelsInRay2;
// Indices in ray
- std::vector<int> indicesInRay, indicesInRay2;
+ Indices indicesInRay, indicesInRay2;
srand (static_cast<unsigned int> (time (nullptr)));
const double LARGE_MAX = 1e7-5*SOME_RESOLUTION;
tree.defineBoundingBox (LARGE_MIN, LARGE_MIN, LARGE_MIN, LARGE_MAX, LARGE_MAX, LARGE_MAX);
tree.getBoundingBox (min_x, min_y, min_z, max_x, max_y, max_z);
- const unsigned int depth = tree.getTreeDepth ();
+ const auto depth = tree.getTreeDepth ();
tree.defineBoundingBox (min_x, min_y, min_z, max_x, max_y, max_z);
ASSERT_EQ (depth, tree.getTreeDepth ());
pcl::PointCloud<pcl::PointXYZ> point_cloud;
- point_cloud.points.reserve (numPts);
+ point_cloud.reserve (numPts);
point_cloud.width = static_cast<std::uint32_t> (numPts);
point_cloud.height = 1;
for (std::size_t i=0; i < numPts; i++)
- point_cloud.points.emplace_back(static_cast<float>(rand () % 10), static_cast<float>(rand () % 10), static_cast<float>(rand () % 10));
+ point_cloud.emplace_back(static_cast<float>(rand () % 10), static_cast<float>(rand () % 10), static_cast<float>(rand () % 10));
pcl::PCLPointCloud2::Ptr input_cloud (new pcl::PCLPointCloud2 ());
std::uint64_t points_added = octreeA.addPointCloud (dst_blob, false);
std::uint64_t LOD_points_added = octreeB.addPointCloud_and_genLOD (dst_blob);
- ASSERT_EQ (points_added, dst_blob->width*dst_blob->height) << "Number of points returned by addPointCloud does not match the number of poitns in the input point cloud\n";
+ ASSERT_EQ (points_added, dst_blob->width*dst_blob->height) << "Number of points returned by addPointCloud does not match the number of points in the input point cloud\n";
ASSERT_EQ (LOD_points_added, dst_blob->width*dst_blob->height) << "Number of points returned by addPointCloud_and_genLOD does not match the number of points in the input point cloud\n";
pcl::PCLPointCloud2::Ptr query_result_a (new pcl::PCLPointCloud2 ());
double sqr_norm_sum = 0;
int found_points = 0;
- std::vector<int> neigh_indices (1);
+ pcl::Indices neigh_indices (1);
std::vector<float> neigh_sqr_dists (1);
for (const auto &model : transformed_model)
{
{
if ( std::isfinite( scene_descriptors_->at (i).descriptor[0] ) )
{
- std::vector<int> neigh_indices (1);
+ pcl::Indices neigh_indices (1);
std::vector<float> neigh_sqr_dists (1);
int found_neighs = match_search.nearestKSearch (scene_descriptors_->at (i), 1, neigh_indices, neigh_sqr_dists);
if(found_neighs == 1 && neigh_sqr_dists[0] < 0.25f)
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
TEST (PCL, KFPCSInitialAlignment)
{
+ const auto previous_verbosity_level = pcl::console::getVerbosityLevel();
+ pcl::console::setVerbosityLevel(pcl::console::L_VERBOSE);
// create shared pointers
PointCloud<PointXYZI>::Ptr cloud_source_ptr, cloud_target_ptr;
cloud_source_ptr = cloud_source.makeShared ();
EXPECT_EQ (cloud_source_aligned.size (), cloud_source.size ());
EXPECT_NEAR (angle3d, 0.f, max_angle3d);
EXPECT_NEAR (translation3d, 0.f, max_translation3d);
+ pcl::console::setVerbosityLevel(previous_verbosity_level); // reset verbosity level
}
using namespace pcl;
using namespace pcl::io;
-
+
PointCloud<PointXYZ> cloud_source, cloud_target;
-
+
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
TEST (PCL, NormalDistributionsTransform)
{
// Check again, for all possible caching schemes
for (int iter = 0; iter < 4; iter++)
{
- bool force_cache = (bool) iter/2;
- bool force_cache_reciprocal = (bool) iter%2;
+ bool force_cache = static_cast<bool> (iter/2);
+ bool force_cache_reciprocal = static_cast<bool> (iter%2);
pcl::search::KdTree<PointT>::Ptr tree(new pcl::search::KdTree<PointT>);
// Ensure that, when force_cache is not set, we are robust to the wrong input
if (force_cache)
FeatureT f;
f.histogram[0] = x;
f.histogram[1] = y;
- feature0.points.push_back (f);
+ feature0.push_back (f);
f.histogram[0] = x;
f.histogram[1] = y - 2.5f;
- feature1.points.push_back (f);
+ feature1.push_back (f);
f.histogram[0] = x - 2.0f;
f.histogram[1] = y + 1.5f;
- feature2.points.push_back (f);
+ feature2.push_back (f);
f.histogram[0] = x + 2.0f;
f.histogram[1] = y + 1.5f;
- feature3.points.push_back (f);
+ feature3.push_back (f);
}
}
feature0.width = feature0.size ();
icp.align(Final);
// Check that we have sucessfully converged
- ASSERT_EQ(icp.hasConverged(), true);
+ ASSERT_TRUE(icp.hasConverged());
// Test that the fitness score is below acceptable threshold
EXPECT_LT(icp.getFitnessScore(), 1e-6);
// Check again, for all possible caching schemes
for (int iter = 0; iter < 4; iter++)
{
- bool force_cache = (bool) iter/2;
- bool force_cache_reciprocal = (bool) iter%2;
+ bool force_cache = static_cast<bool> (iter/2);
+ bool force_cache_reciprocal = static_cast<bool> (iter%2);
pcl::search::KdTree<PointT>::Ptr tree(new pcl::search::KdTree<PointT>);
// Ensure that, when force_cache is not set, we are robust to the wrong input
if (force_cache)
// Check again, for all possible caching schemes
for (int iter = 0; iter < 4; iter++)
{
- bool force_cache = (bool) iter/2;
- bool force_cache_reciprocal = (bool) iter%2;
+ bool force_cache = static_cast<bool> (iter/2);
+ bool force_cache_reciprocal = static_cast<bool> (iter%2);
pcl::search::KdTree<PointT>::Ptr tree(new pcl::search::KdTree<PointT>);
// Ensure that, when force_cache is not set, we are robust to the wrong input
if (force_cache)
// Check again, for all possible caching schemes
for (int iter = 0; iter < 4; iter++)
{
- bool force_cache = (bool) iter/2;
- bool force_cache_reciprocal = (bool) iter%2;
+ bool force_cache = static_cast<bool> (iter/2);
+ bool force_cache_reciprocal = static_cast<bool> (iter%2);
pcl::search::KdTree<PointT>::Ptr tree(new pcl::search::KdTree<PointT>);
// Ensure that, when force_cache is not set, we are robust to the wrong input
if (force_cache)
pyramid_target->compute ();
float similarity_value = PyramidFeatureHistogram<PPFSignature>::comparePyramidFeatureHistograms (pyramid_source, pyramid_target);
- EXPECT_NEAR (similarity_value, 0.74101555347442627, 1e-4);
+ EXPECT_NEAR (similarity_value, 0.738492727, 1e-4);
std::vector<std::pair<float, float> > dim_range_target2;
for (std::size_t i = 0; i < 3; ++i) dim_range_target2.emplace_back(static_cast<float> (-M_PI) * 5.0f, static_cast<float> (M_PI) * 5.0f);
pyramid_target->compute ();
float similarity_value2 = PyramidFeatureHistogram<PPFSignature>::comparePyramidFeatureHistograms (pyramid_source, pyramid_target);
- EXPECT_NEAR (similarity_value2, 0.80097091197967529, 1e-4);
+ EXPECT_NEAR (similarity_value2, 0.798465133, 1e-4);
std::vector<std::pair<float, float> > dim_range_target3;
pyramid_target->compute ();
float similarity_value3 = PyramidFeatureHistogram<PPFSignature>::comparePyramidFeatureHistograms (pyramid_source, pyramid_target);
- EXPECT_NEAR (similarity_value3, 0.87623238563537598, 1e-3);
+ EXPECT_NEAR (similarity_value3, 0.873699546, 1e-3);
}
// Suat G: disabled, since the transformation does not look correct.
#include <pcl/point_types.h>
#include <pcl/io/pcd_io.h>
-#include <pcl/registration/eigen.h>
#include <pcl/registration/correspondence_estimation.h>
#include <pcl/registration/correspondence_rejection_distance.h>
#include <pcl/registration/correspondence_rejection_median_distance.h>
using PointT = pcl::PointXYZ;
for (int iter = 0; iter < 4; iter++)
{
- bool force_cache = (bool) iter/2;
- bool force_cache_reciprocal = (bool) iter%2;
+ bool force_cache = static_cast<bool> (iter/2);
+ bool force_cache_reciprocal = static_cast<bool> (iter%2);
pcl::search::KdTree<PointT>::Ptr tree(new pcl::search::KdTree<PointT>);
// Ensure that, when force_cache is not set, we are robust to the wrong input
if (force_cache)
using PointT = pcl::PointXYZ;
for (int iter = 0; iter < 4; iter++)
{
- bool force_cache = (bool) iter/2;
- bool force_cache_reciprocal = (bool) iter%2;
+ bool force_cache = static_cast<bool> (iter/2);
+ bool force_cache_reciprocal = static_cast<bool> (iter%2);
pcl::search::KdTree<PointT>::Ptr tree(new pcl::search::KdTree<PointT>);
// Ensure that, when force_cache is not set, we are robust to the wrong input
if (force_cache)
*
*/
+#include <Eigen/Geometry> // for Quaternionf
+
#include <pcl/test/gtest.h>
#include <pcl/point_types.h>
const unsigned point_count = 100;
PointCloud<PointXYZ> cloud;
- cloud.points.resize (point_count);
+ cloud.resize (point_count);
for (unsigned idx = 0; idx < point_count; ++idx)
{
cloud[idx].x = static_cast<float> (idx);
SampleConsensusModelSpherePtr model (new SampleConsensusModelSphere<PointXYZ> (cloud.makeShared ()));
TypeParam sac (model, 0.03);
+ // This test sometimes fails for LMedS on azure, but always passes when run locally.
+ // Enable all output for LMedS, so that when it fails next time, we hopefully see why.
+ // This can be removed again when the failure reason is found and fixed.
+ int debug_verbosity_level = 0;
+ const auto previous_verbosity_level = pcl::console::getVerbosityLevel();
+ if (std::is_same<TypeParam, LeastMedianSquares<PointXYZ>>::value) {
+ debug_verbosity_level = 2;
+ pcl::console::setVerbosityLevel(pcl::console::L_VERBOSE);
+ }
+
// Set up timed conditions
std::condition_variable cv;
std::mutex mtx;
// Create the RANSAC object
std::thread thread ([&] ()
{
- sac.computeModel (0);
+ sac.computeModel (debug_verbosity_level);
// Notify things are done
std::lock_guard<std::mutex> lock (mtx);
// release lock to avoid deadlock
lock.unlock();
thread.join ();
+
+ pcl::console::setVerbosityLevel(previous_verbosity_level); // reset verbosity level
}
int
{
// Use a custom point cloud for these tests until we need something better
PointCloud<PointXYZ> cloud;
- cloud.points.resize (10);
+ cloud.resize (10);
cloud[0].getVector3fMap () << 1.0f, 2.00f, 3.00f;
cloud[1].getVector3fMap () << 4.0f, 5.00f, 6.00f;
bool result = sac.computeModel ();
ASSERT_TRUE (result);
- std::vector<int> sample;
+ pcl::Indices sample;
sac.getModel (sample);
EXPECT_EQ (2, sample.size ());
- std::vector<int> inliers;
+ pcl::Indices inliers;
sac.getInliers (inliers);
EXPECT_EQ (8, inliers.size ());
TEST (SampleConsensusModelLine, OnGroundPlane)
{
PointCloud<PointXYZ> cloud;
- cloud.points.resize (10);
+ cloud.resize (10);
// All the points are on the ground plane (z=0).
// The line is parallel to the x axis, so all the inlier points have the same z and y coordinates.
bool result = sac.computeModel ();
ASSERT_TRUE (result);
- std::vector<int> inliers;
+ pcl::Indices inliers;
sac.getInliers (inliers);
EXPECT_EQ (6, inliers.size ());
bool result = sac.computeModel ();
ASSERT_TRUE (result);
- std::vector<int> sample;
+ pcl::Indices sample;
sac.getModel (sample);
EXPECT_EQ (2, sample.size ());
- std::vector<int> inliers;
+ pcl::Indices inliers;
sac.getInliers (inliers);
EXPECT_EQ (6, inliers.size ());
#include <pcl/pcl_tests.h>
#include <pcl/io/pcd_io.h>
#include <pcl/point_types.h>
+#include <pcl/common/utils.h>
#include <pcl/sample_consensus/msac.h>
#include <pcl/sample_consensus/lmeds.h>
PointCloud<PointXYZ>::Ptr cloud_ (new PointCloud<PointXYZ> ());
PointCloud<Normal>::Ptr normals_ (new PointCloud<Normal> ());
-std::vector<int> indices_;
+pcl::Indices indices_;
float plane_coeffs_[] = {-0.8964f, -0.5868f, -1.208f};
template <typename ModelType, typename SacType>
bool result = sac.computeModel ();
ASSERT_TRUE (result);
- std::vector<int> sample;
+ pcl::Indices sample;
sac.getModel (sample);
EXPECT_EQ (3, sample.size ());
- std::vector<int> inliers;
+ pcl::Indices inliers;
sac.getInliers (inliers);
EXPECT_LT (inlier_number, inliers.size ());
// Use a custom point cloud for these tests until we need something better
PointCloud<PointXYZ> cloud;
PointCloud<Normal> normals;
- cloud.points.resize (10);
+ cloud.resize (10);
normals.resize (10);
for (std::size_t idx = 0; idx < cloud.size (); ++idx)
RandomSampleConsensus<PointXYZ> sac (model, 0.03);
sac.computeModel();
- std::vector<int> inliers;
+ pcl::Indices inliers;
sac.getInliers (inliers);
ASSERT_EQ (cloud.size (), inliers.size ());
}
RandomSampleConsensus<PointXYZ> sac (model, 0.03);
sac.computeModel ();
- std::vector<int> inliers;
+ pcl::Indices inliers;
sac.getInliers (inliers);
ASSERT_EQ (cloud.size (), inliers.size ());
}
RandomSampleConsensus<PointXYZ> sac (model, 0.03);
sac.computeModel ();
- std::vector<int> inliers;
+ pcl::Indices inliers;
sac.getInliers (inliers);
ASSERT_EQ (0, inliers.size ());
}
}
+//////////////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointT>
+class SampleConsensusModelPlaneTest : private SampleConsensusModelPlane<PointT>
+{
+ public:
+ using SampleConsensusModelPlane<PointT>::SampleConsensusModelPlane;
+ using SampleConsensusModelPlane<PointT>::countWithinDistanceStandard;
+#if defined (__SSE__) && defined (__SSE2__) && defined (__SSE4_1__)
+ using SampleConsensusModelPlane<PointT>::countWithinDistanceSSE;
+#endif
+#if defined (__AVX__) && defined (__AVX2__)
+ using SampleConsensusModelPlane<PointT>::countWithinDistanceAVX;
+#endif
+};
+
+TEST (SampleConsensusModelPlane, SIMD_countWithinDistance) // Test if all countWithinDistance implementations return the same value
+{
+ const auto seed = static_cast<unsigned> (std::time (nullptr));
+ srand (seed);
+ for (size_t i=0; i<100; i++) // Run as often as you like
+ {
+ // Generate a cloud with 1000 random points
+ PointCloud<PointXYZ> cloud;
+ pcl::Indices indices;
+ cloud.resize (1000);
+ for (std::size_t idx = 0; idx < cloud.size (); ++idx)
+ {
+ cloud[idx].x = 2.0 * static_cast<float> (rand ()) / RAND_MAX - 1.0;
+ cloud[idx].y = 2.0 * static_cast<float> (rand ()) / RAND_MAX - 1.0;
+ cloud[idx].z = 2.0 * static_cast<float> (rand ()) / RAND_MAX - 1.0;
+ if (rand () % 2 == 0)
+ {
+ indices.push_back (static_cast<int> (idx));
+ }
+ }
+ SampleConsensusModelPlaneTest<PointXYZ> model (cloud.makeShared (), indices, true);
+
+ // Generate random model parameters
+ Eigen::VectorXf model_coefficients(4);
+ model_coefficients << 2.0 * static_cast<float> (rand ()) / RAND_MAX - 1.0,
+ 2.0 * static_cast<float> (rand ()) / RAND_MAX - 1.0,
+ 2.0 * static_cast<float> (rand ()) / RAND_MAX - 1.0, 0.0;
+ model_coefficients.normalize ();
+ model_coefficients(3) = 2.0 * static_cast<float> (rand ()) / RAND_MAX - 1.0; // Last parameter
+
+ const double threshold = 0.1 * static_cast<double> (rand ()) / RAND_MAX; // threshold in [0; 0.1]
+
+ // The number of inliers is usually somewhere between 0 and 100
+ const auto res_standard = model.countWithinDistanceStandard (model_coefficients, threshold); // Standard
+ PCL_DEBUG ("seed=%lu, i=%lu, model=(%f, %f, %f, %f), threshold=%f, res_standard=%lu\n", seed, i,
+ model_coefficients(0), model_coefficients(1), model_coefficients(2), model_coefficients(3), threshold, res_standard);
+#if defined (__SSE__) && defined (__SSE2__) && defined (__SSE4_1__)
+ const auto res_sse = model.countWithinDistanceSSE (model_coefficients, threshold); // SSE
+ ASSERT_EQ (res_standard, res_sse);
+#endif
+#if defined (__AVX__) && defined (__AVX2__)
+ const auto res_avx = model.countWithinDistanceAVX (model_coefficients, threshold); // AVX
+ ASSERT_EQ (res_standard, res_avx);
+#endif
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointT, typename PointNT>
+class SampleConsensusModelNormalPlaneTest : private SampleConsensusModelNormalPlane<PointT, PointNT>
+{
+ public:
+ using SampleConsensusModelNormalPlane<PointT, PointNT>::SampleConsensusModelNormalPlane;
+ using SampleConsensusModelNormalPlane<PointT, PointNT>::setNormalDistanceWeight;
+ using SampleConsensusModelNormalPlane<PointT, PointNT>::setInputNormals;
+ using SampleConsensusModelNormalPlane<PointT, PointNT>::countWithinDistanceStandard;
+#if defined (__SSE__) && defined (__SSE2__) && defined (__SSE4_1__)
+ using SampleConsensusModelNormalPlane<PointT, PointNT>::countWithinDistanceSSE;
+#endif
+#if defined (__AVX__) && defined (__AVX2__)
+ using SampleConsensusModelNormalPlane<PointT, PointNT>::countWithinDistanceAVX;
+#endif
+};
+
+TEST (SampleConsensusModelNormalPlane, SIMD_countWithinDistance) // Test if all countWithinDistance implementations return the same value
+{
+ const auto seed = static_cast<unsigned> (std::time (nullptr));
+ srand (seed);
+ for (size_t i=0; i<1000; i++) // Run as often as you like
+ {
+ // Generate a cloud with 10000 random points
+ PointCloud<PointXYZ> cloud;
+ PointCloud<Normal> normal_cloud;
+ pcl::Indices indices;
+ cloud.resize (10000);
+ normal_cloud.resize (10000);
+ for (std::size_t idx = 0; idx < cloud.size (); ++idx)
+ {
+ cloud[idx].x = 2.0 * static_cast<float> (rand ()) / RAND_MAX - 1.0;
+ cloud[idx].y = 2.0 * static_cast<float> (rand ()) / RAND_MAX - 1.0;
+ cloud[idx].z = 2.0 * static_cast<float> (rand ()) / RAND_MAX - 1.0;
+ const double a = 2.0 * static_cast<float> (rand ()) / RAND_MAX - 1.0;
+ const double b = 2.0 * static_cast<float> (rand ()) / RAND_MAX - 1.0;
+ const double c = 2.0 * static_cast<float> (rand ()) / RAND_MAX - 1.0;
+ const double factor = 1.0 / sqrt(a * a + b * b + c * c);
+ normal_cloud[idx].normal[0] = a * factor;
+ normal_cloud[idx].normal[1] = b * factor;
+ normal_cloud[idx].normal[2] = c * factor;
+ if (rand () % 4 != 0)
+ {
+ indices.push_back (static_cast<int> (idx));
+ }
+ }
+ SampleConsensusModelNormalPlaneTest<PointXYZ, Normal> model (cloud.makeShared (), indices, true);
+
+ const double normal_distance_weight = 0.3 * static_cast<double> (rand ()) / RAND_MAX; // in [0; 0.3]
+ model.setNormalDistanceWeight (normal_distance_weight);
+ model.setInputNormals (normal_cloud.makeShared ());
+
+ // Generate random model parameters
+ Eigen::VectorXf model_coefficients(4);
+ model_coefficients << 2.0 * static_cast<float> (rand ()) / RAND_MAX - 1.0,
+ 2.0 * static_cast<float> (rand ()) / RAND_MAX - 1.0,
+ 2.0 * static_cast<float> (rand ()) / RAND_MAX - 1.0, 0.0;
+ model_coefficients.normalize ();
+ model_coefficients(3) = 2.0 * static_cast<float> (rand ()) / RAND_MAX - 1.0; // Last parameter
+
+ const double threshold = 0.1 * static_cast<double> (rand ()) / RAND_MAX; // threshold in [0; 0.1]
+
+ // The number of inliers is usually somewhere between 0 and 100
+ const auto res_standard = model.countWithinDistanceStandard (model_coefficients, threshold); // Standard
+ pcl::utils::ignore(res_standard);
+#if defined (__SSE__) && defined (__SSE2__) && defined (__SSE4_1__)
+ const auto res_sse = model.countWithinDistanceSSE (model_coefficients, threshold); // SSE
+ EXPECT_LE ((res_standard > res_sse ? res_standard - res_sse : res_sse - res_standard), 2u) << "seed=" << seed << ", i=" << i
+ << ", model=(" << model_coefficients(0) << ", " << model_coefficients(1) << ", " << model_coefficients(2) << ", " << model_coefficients(3)
+ << "), threshold=" << threshold << ", normal_distance_weight=" << normal_distance_weight << ", res_standard=" << res_standard << std::endl;
+#endif
+#if defined (__AVX__) && defined (__AVX2__)
+ const auto res_avx = model.countWithinDistanceAVX (model_coefficients, threshold); // AVX
+ EXPECT_LE ((res_standard > res_avx ? res_standard - res_avx : res_avx - res_standard), 2u) << "seed=" << seed << ", i=" << i
+ << ", model=(" << model_coefficients(0) << ", " << model_coefficients(1) << ", " << model_coefficients(2) << ", " << model_coefficients(3)
+ << "), threshold=" << threshold << ", normal_distance_weight=" << normal_distance_weight << ", res_standard=" << res_standard << std::endl;
+#endif
+ }
+}
int
main (int argc, char** argv)
// Use a custom point cloud for these tests until we need something better
PointCloud<PointXYZ> cloud;
- cloud.points.resize (10);
+ cloud.resize (10);
cloud[0].getVector3fMap () << 1.7068f, 1.0684f, 2.2147f;
cloud[1].getVector3fMap () << 2.4708f, 2.3081f, 1.1736f;
cloud[2].getVector3fMap () << 2.7609f, 1.9095f, 1.3574f;
bool result = sac.computeModel ();
ASSERT_TRUE (result);
- std::vector<int> sample;
+ pcl::Indices sample;
sac.getModel (sample);
EXPECT_EQ (4, sample.size ());
- std::vector<int> inliers;
+ pcl::Indices inliers;
sac.getInliers (inliers);
EXPECT_EQ (9, inliers.size ());
EXPECT_NEAR (2, coeff_refined[2] / coeff_refined[3], 1e-2);
}
+//////////////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointT>
+class SampleConsensusModelSphereTest : private SampleConsensusModelSphere<PointT>
+{
+ public:
+ using SampleConsensusModelSphere<PointT>::SampleConsensusModelSphere;
+ using SampleConsensusModelSphere<PointT>::countWithinDistanceStandard;
+#if defined (__SSE__) && defined (__SSE2__) && defined (__SSE4_1__)
+ using SampleConsensusModelSphere<PointT>::countWithinDistanceSSE;
+#endif
+#if defined (__AVX__) && defined (__AVX2__)
+ using SampleConsensusModelSphere<PointT>::countWithinDistanceAVX;
+#endif
+};
+
+TEST (SampleConsensusModelSphere, SIMD_countWithinDistance) // Test if all countWithinDistance implementations return the same value
+{
+ const auto seed = static_cast<unsigned> (std::time (nullptr));
+ srand (seed);
+ for (size_t i=0; i<100; i++) // Run as often as you like
+ {
+ // Generate a cloud with 1000 random points
+ PointCloud<PointXYZ> cloud;
+ pcl::Indices indices;
+ cloud.resize (1000);
+ for (std::size_t idx = 0; idx < cloud.size (); ++idx)
+ {
+ cloud[idx].x = 2.0 * static_cast<float> (rand ()) / RAND_MAX - 1.0;
+ cloud[idx].y = 2.0 * static_cast<float> (rand ()) / RAND_MAX - 1.0;
+ cloud[idx].z = 2.0 * static_cast<float> (rand ()) / RAND_MAX - 1.0;
+ if (rand () % 3 != 0)
+ {
+ indices.push_back (static_cast<int> (idx));
+ }
+ }
+ SampleConsensusModelSphereTest<PointXYZ> model (cloud.makeShared (), indices, true);
+
+ // Generate random sphere model parameters
+ Eigen::VectorXf model_coefficients(4);
+ model_coefficients << 2.0 * static_cast<float> (rand ()) / RAND_MAX - 1.0,
+ 2.0 * static_cast<float> (rand ()) / RAND_MAX - 1.0,
+ 2.0 * static_cast<float> (rand ()) / RAND_MAX - 1.0,
+ 0.15 * static_cast<float> (rand ()) / RAND_MAX; // center and radius
+
+ const double threshold = 0.15 * static_cast<double> (rand ()) / RAND_MAX; // threshold in [0; 0.1]
+
+ // The number of inliers is usually somewhere between 0 and 10
+ const auto res_standard = model.countWithinDistanceStandard (model_coefficients, threshold); // Standard
+ PCL_DEBUG ("seed=%lu, i=%lu, model=(%f, %f, %f, %f), threshold=%f, res_standard=%lu\n", seed, i,
+ model_coefficients(0), model_coefficients(1), model_coefficients(2), model_coefficients(3), threshold, res_standard);
+#if defined (__SSE__) && defined (__SSE2__) && defined (__SSE4_1__)
+ const auto res_sse = model.countWithinDistanceSSE (model_coefficients, threshold); // SSE
+ ASSERT_EQ (res_standard, res_sse);
+#endif
+#if defined (__AVX__) && defined (__AVX2__)
+ const auto res_avx = model.countWithinDistanceAVX (model_coefficients, threshold); // AVX
+ ASSERT_EQ (res_standard, res_avx);
+#endif
+ }
+}
+
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
TEST (SampleConsensusModelNormalSphere, RANSAC)
{
// Use a custom point cloud for these tests until we need something better
PointCloud<PointXYZ> cloud;
PointCloud<Normal> normals;
- cloud.points.resize (27); normals.points.resize (27);
+ cloud.resize (27); normals.resize (27);
cloud[ 0].getVector3fMap () << -0.014695f, 0.009549f, 0.954775f;
cloud[ 1].getVector3fMap () << 0.014695f, 0.009549f, 0.954775f;
cloud[ 2].getVector3fMap () << -0.014695f, 0.040451f, 0.954775f;
bool result = sac.computeModel ();
ASSERT_TRUE (result);
- std::vector<int> sample;
+ pcl::Indices sample;
sac.getModel (sample);
EXPECT_EQ (4, sample.size ());
- std::vector<int> inliers;
+ pcl::Indices inliers;
sac.getInliers (inliers);
EXPECT_EQ (27, inliers.size ());
// Use a custom point cloud for these tests until we need something better
PointCloud<PointXYZ> cloud;
PointCloud<Normal> normals;
- cloud.points.resize (31); normals.points.resize (31);
+ cloud.resize (31); normals.resize (31);
cloud[ 0].getVector3fMap () << -0.011247f, 0.200000f, 0.965384f;
cloud[ 1].getVector3fMap () << 0.000000f, 0.200000f, 0.963603f;
bool result = sac.computeModel ();
ASSERT_TRUE (result);
- std::vector<int> sample;
+ pcl::Indices sample;
sac.getModel (sample);
EXPECT_EQ (3, sample.size ());
- std::vector<int> inliers;
+ pcl::Indices inliers;
sac.getInliers (inliers);
EXPECT_EQ (31, inliers.size ());
// Use a custom point cloud for these tests until we need something better
PointCloud<PointXYZ> cloud;
PointCloud<Normal> normals;
- cloud.points.resize (20); normals.points.resize (20);
+ cloud.resize (20); normals.resize (20);
cloud[ 0].getVector3fMap () << -0.499902f, 2.199701f, 0.000008f;
cloud[ 1].getVector3fMap () << -0.875397f, 2.030177f, 0.050104f;
bool result = sac.computeModel ();
ASSERT_TRUE (result);
- std::vector<int> sample;
+ pcl::Indices sample;
sac.getModel (sample);
EXPECT_EQ (2, sample.size ());
- std::vector<int> inliers;
+ pcl::Indices inliers;
sac.getInliers (inliers);
EXPECT_EQ (20, inliers.size ());
// Use a custom point cloud for these tests until we need something better
PointCloud<PointXYZ> cloud;
- cloud.points.resize (18);
+ cloud.resize (18);
cloud[ 0].getVector3fMap () << 3.587751f, -4.190982f, 0.0f;
cloud[ 1].getVector3fMap () << 3.808883f, -4.412265f, 0.0f;
bool result = sac.computeModel ();
ASSERT_TRUE (result);
- std::vector<int> sample;
+ pcl::Indices sample;
sac.getModel (sample);
EXPECT_EQ (3, sample.size ());
- std::vector<int> inliers;
+ pcl::Indices inliers;
sac.getInliers (inliers);
EXPECT_EQ (17, inliers.size ());
EXPECT_NEAR ( 1, coeff_refined[2], 1e-3);
}
+//////////////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointT>
+class SampleConsensusModelCircle2DTest : private SampleConsensusModelCircle2D<PointT>
+{
+ public:
+ using SampleConsensusModelCircle2D<PointT>::SampleConsensusModelCircle2D;
+ using SampleConsensusModelCircle2D<PointT>::countWithinDistanceStandard;
+#if defined (__SSE__) && defined (__SSE2__) && defined (__SSE4_1__)
+ using SampleConsensusModelCircle2D<PointT>::countWithinDistanceSSE;
+#endif
+#if defined (__AVX__) && defined (__AVX2__)
+ using SampleConsensusModelCircle2D<PointT>::countWithinDistanceAVX;
+#endif
+};
+
+TEST (SampleConsensusModelCircle2D, SIMD_countWithinDistance) // Test if all countWithinDistance implementations return the same value
+{
+ const auto seed = static_cast<unsigned> (std::time (nullptr));
+ srand (seed);
+ for (size_t i=0; i<100; i++) // Run as often as you like
+ {
+ // Generate a cloud with 1000 random points
+ PointCloud<PointXYZ> cloud;
+ pcl::Indices indices;
+ cloud.resize (1000);
+ for (std::size_t idx = 0; idx < cloud.size (); ++idx)
+ {
+ cloud[idx].x = 2.0 * static_cast<float> (rand ()) / RAND_MAX - 1.0;
+ cloud[idx].y = 2.0 * static_cast<float> (rand ()) / RAND_MAX - 1.0;
+ cloud[idx].z = 2.0 * static_cast<float> (rand ()) / RAND_MAX - 1.0;
+ if (rand () % 2 == 0)
+ {
+ indices.push_back (static_cast<int> (idx));
+ }
+ }
+ SampleConsensusModelCircle2DTest<PointXYZ> model (cloud.makeShared (), indices, true);
+
+ // Generate random circle model parameters
+ Eigen::VectorXf model_coefficients(3);
+ model_coefficients << 2.0 * static_cast<float> (rand ()) / RAND_MAX - 1.0,
+ 2.0 * static_cast<float> (rand ()) / RAND_MAX - 1.0,
+ 0.1 * static_cast<float> (rand ()) / RAND_MAX; // center and radius
+
+ const double threshold = 0.1 * static_cast<double> (rand ()) / RAND_MAX; // threshold in [0; 0.1]
+
+ // The number of inliers is usually somewhere between 0 and 20
+ const auto res_standard = model.countWithinDistanceStandard (model_coefficients, threshold); // Standard
+ PCL_DEBUG ("seed=%lu, i=%lu, model=(%f, %f, %f), threshold=%f, res_standard=%lu\n", seed, i,
+ model_coefficients(0), model_coefficients(1), model_coefficients(2), threshold, res_standard);
+#if defined (__SSE__) && defined (__SSE2__) && defined (__SSE4_1__)
+ const auto res_sse = model.countWithinDistanceSSE (model_coefficients, threshold); // SSE
+ ASSERT_EQ (res_standard, res_sse);
+#endif
+#if defined (__AVX__) && defined (__AVX2__)
+ const auto res_avx = model.countWithinDistanceAVX (model_coefficients, threshold); // AVX
+ ASSERT_EQ (res_standard, res_avx);
+#endif
+ }
+}
+
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
TEST (SampleConsensusModelCircle3D, RANSAC)
{
// Use a custom point cloud for these tests until we need something better
PointCloud<PointXYZ> cloud;
- cloud.points.resize (20);
+ cloud.resize (20);
cloud[ 0].getVector3fMap () << 1.00000000f, 5.0000000f, -2.9000001f;
cloud[ 1].getVector3fMap () << 1.03420200f, 5.0000000f, -2.9060307f;
bool result = sac.computeModel ();
ASSERT_TRUE (result);
- std::vector<int> sample;
+ pcl::Indices sample;
sac.getModel (sample);
EXPECT_EQ (3, sample.size ());
- std::vector<int> inliers;
+ pcl::Indices inliers;
sac.getInliers (inliers);
EXPECT_EQ (18, inliers.size ());
#include <iostream>
#include <pcl/test/gtest.h>
+#include <pcl/common/io.h> // for copyPointCloud
#include <pcl/common/distances.h>
#include <pcl/common/time.h>
-#include <pcl/search/pcl_search.h>
+#include <pcl/search/kdtree.h> // for pcl::search::KdTree
#include <pcl/search/impl/flann_search.hpp>
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
for (float z = -0.5f; z <= 0.5f; z += resolution)
for (float y = -0.5f; y <= 0.5f; y += resolution)
for (float x = -0.5f; x <= 0.5f; x += resolution)
- cloud.points.emplace_back(x, y, z);
+ cloud.emplace_back(x, y, z);
cloud.width = cloud.size ();
cloud.height = 1;
+ srand (int (time (nullptr)));
+ // Randomly create a new point cloud, use points.emplace_back
cloud_big.width = 640;
cloud_big.height = 480;
- srand (int (time (nullptr)));
- // Randomly create a new point cloud
for (std::size_t i = 0; i < cloud_big.width * cloud_big.height; ++i)
- cloud_big.points.emplace_back(
- float (1024 * rand () / (RAND_MAX + 1.0)),
- float (1024 * rand () / (RAND_MAX + 1.0)),
- float (1024 * rand () / (RAND_MAX + 1.0)));
+ cloud_big.points.emplace_back(static_cast<float>(1024 * rand() / (RAND_MAX + 1.0)),
+ static_cast<float>(1024 * rand() / (RAND_MAX + 1.0)),
+ static_cast<float>(1024 * rand() / (RAND_MAX + 1.0)));
}
++counter;
}
- std::vector<int> k_indices;
+ pcl::Indices k_indices;
k_indices.resize (no_of_neighbors);
std::vector<float> k_distances;
k_distances.resize (no_of_neighbors);
EXPECT_EQ (k_indices.size (), no_of_neighbors);
// Check if all found neighbors have distance smaller than max_dist
- for (const int &k_index : k_indices)
+ for (const auto &k_index : k_indices)
{
const PointXYZ& point = cloud[k_index];
bool ok = euclideanDistance (test_point, point) <= max_dist;
std::vector< std::vector< float > > dists;
- std::vector< std::vector< int > > indices;
- FlannSearch.nearestKSearchT (cloud_rgb, std::vector<int> (),no_of_neighbors,indices,dists);
+ std::vector< pcl::Indices > indices;
+ FlannSearch.nearestKSearchT (cloud_rgb, pcl::Indices (),no_of_neighbors,indices,dists);
- std::vector<int> k_indices;
+ pcl::Indices k_indices;
k_indices.resize (no_of_neighbors);
std::vector<float> k_distances;
k_distances.resize (no_of_neighbors);
- //vector<int> k_indices_t;
+ //pcl::Indices k_indices_t;
//k_indices_t.resize (no_of_neighbors);
//vector<float> k_distances_t;
//k_distances_t.resize (no_of_neighbors);
for (std::size_t j = 0; j< no_of_neighbors; j++)
{
EXPECT_TRUE (k_indices[j] == indices[i][j] || k_distances[j] == dists[i][j]);
- //EXPECT_TRUE (k_indices[j] == k_indices_t[j]);
- //EXPECT_TRUE (k_distances[j] == k_distances_t[j]);
+ //EXPECT_EQ (k_indices[j], k_indices_t[j]);
+ //EXPECT_EQ (k_distances[j], k_distances_t[j]);
}
}
FlannSearch.setInputCloud (cloud_big.makeShared ());
std::vector< std::vector< float > > dists;
- std::vector< std::vector< int > > indices;
- FlannSearch.nearestKSearch (cloud_big, std::vector<int>(),no_of_neighbors,indices,dists);
+ std::vector< pcl::Indices > indices;
+ FlannSearch.nearestKSearch (cloud_big, pcl::Indices(),no_of_neighbors,indices,dists);
- std::vector<int> k_indices;
+ pcl::Indices k_indices;
k_indices.resize (no_of_neighbors);
std::vector<float> k_distances;
k_distances.resize (no_of_neighbors);
flann_search.setInputCloud (cloud_big.makeShared ());
std::vector< std::vector< float > > dists;
- std::vector< std::vector< int > > indices;
- std::vector< int > query_indices;
+ std::vector< pcl::Indices > indices;
+ pcl::Indices query_indices;
for (std::size_t i = 0; i<cloud_big.size (); i+=2)
{
query_indices.push_back (int (i));
}
flann_search.nearestKSearch (cloud_big, query_indices,no_of_neighbors,indices,dists);
- std::vector<int> k_indices;
+ pcl::Indices k_indices;
k_indices.resize (no_of_neighbors);
std::vector<float> k_distances;
k_distances.resize (no_of_neighbors);
{
int no_of_neighbors=3;
- std::vector<int> k_indices;
+ pcl::Indices k_indices;
k_indices.resize (no_of_neighbors);
std::vector<float> k_distances;
k_distances.resize (no_of_neighbors);
kdtree_search->nearestKSearch (point, no_of_neighbors, k_indices, k_distances);
}
- std::vector<std::vector<int> > indices_flann;
+ std::vector<pcl::Indices> indices_flann;
std::vector<std::vector<float> > dists_flann;
- std::vector<std::vector<int> > indices_tree;
+ std::vector<pcl::Indices> indices_tree;
std::vector<std::vector<float> > dists_tree;
indices_flann.resize (cloud_big.size ());
dists_flann.resize (cloud_big.size ());
{
ScopeTime scopeTime ("FLANN multi nearestKSearch");
- flann_search->nearestKSearch (cloud_big, std::vector<int> (), no_of_neighbors, indices_flann,dists_flann);
+ flann_search->nearestKSearch (cloud_big, pcl::Indices (), no_of_neighbors, indices_flann,dists_flann);
}
{
ScopeTime scopeTime ("kd tree multi nearestKSearch");
- kdtree_search->nearestKSearch (cloud_big, std::vector<int> (), no_of_neighbors, indices_tree,dists_tree);
+ kdtree_search->nearestKSearch (cloud_big, pcl::Indices (), no_of_neighbors, indices_tree,dists_tree);
}
ASSERT_EQ (indices_flann.size (), dists_flann.size ());
}
- std::vector<int> query_indices;
+ pcl::Indices query_indices;
for (std::size_t i = 0; i<cloud_big.size (); i+=2)
query_indices.push_back (int (i));
*/
#include <iostream>
#include <pcl/test/gtest.h>
+#include <pcl/common/io.h> // for copyPointCloud
#include <pcl/common/time.h>
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
for (float z = -0.5f; z <= 0.5f; z += resolution)
for (float y = -0.5f; y <= 0.5f; y += resolution)
for (float x = -0.5f; x <= 0.5f; x += resolution)
- cloud.points.emplace_back(x, y, z);
+ cloud.emplace_back(x, y, z);
cloud.width = cloud.size ();
cloud.height = 1;
+ srand (static_cast<unsigned int> (time (nullptr)));
cloud_big.width = 640;
cloud_big.height = 480;
- srand (static_cast<unsigned int> (time (nullptr)));
- // Randomly create a new point cloud
+ // Randomly create a new point cloud, use points.emplace_back
for (std::size_t i = 0; i < cloud_big.width * cloud_big.height; ++i)
- cloud_big.points.emplace_back(static_cast<float> (1024 * rand () / (RAND_MAX + 1.0)),
- static_cast<float> (1024 * rand () / (RAND_MAX + 1.0)),
- static_cast<float> (1024 * rand () / (RAND_MAX + 1.0)));
+ cloud_big.points.emplace_back(static_cast<float>(1024 * rand() / (RAND_MAX + 1.0)),
+ static_cast<float>(1024 * rand() / (RAND_MAX + 1.0)),
+ static_cast<float>(1024 * rand() / (RAND_MAX + 1.0)));
}
/* Test for KdTree nearestKSearch */TEST (PCL, KdTree_nearestKSearch)
++counter;
}
- std::vector<int> k_indices;
+ pcl::Indices k_indices;
k_indices.resize (no_of_neighbors);
std::vector<float> k_distances;
k_distances.resize (no_of_neighbors);
EXPECT_EQ (k_indices.size (), no_of_neighbors);
// Check if all found neighbors have distance smaller than max_dist
- for (const int &k_index : k_indices)
+ for (const auto &k_index : k_indices)
{
const PointXYZ& point = cloud[k_index];
bool ok = euclideanDistance (test_point, point) <= max_dist;
copyPointCloud (cloud_big, cloud_rgb);
std::vector< std::vector< float > > dists;
- std::vector< std::vector< int > > indices;
- kdtree.nearestKSearchT (cloud_rgb, std::vector<int> (),no_of_neighbors,indices,dists);
+ std::vector< pcl::Indices > indices;
+ kdtree.nearestKSearchT (cloud_rgb, pcl::Indices (),no_of_neighbors,indices,dists);
- std::vector<int> k_indices;
+ pcl::Indices k_indices;
k_indices.resize (no_of_neighbors);
std::vector<float> k_distances;
k_distances.resize (no_of_neighbors);
- std::vector<int> k_indices_t;
+ pcl::Indices k_indices_t;
k_indices_t.resize (no_of_neighbors);
std::vector<float> k_distances_t;
k_distances_t.resize (no_of_neighbors);
for (std::size_t j=0; j< no_of_neighbors; j++)
{
EXPECT_TRUE (k_indices[j] == indices[i][j] || k_distances[j] == dists[i][j]);
- EXPECT_TRUE (k_indices[j] == k_indices_t[j]);
- EXPECT_TRUE (k_distances[j] == k_distances_t[j]);
+ EXPECT_EQ (k_indices[j], k_indices_t[j]);
+ EXPECT_EQ (k_distances[j], k_distances_t[j]);
}
}
}
kdtree.setInputCloud (cloud_big.makeShared ());
std::vector< std::vector< float > > dists;
- std::vector< std::vector< int > > indices;
- kdtree.nearestKSearch (cloud_big, std::vector<int> (),no_of_neighbors,indices,dists);
+ std::vector< pcl::Indices > indices;
+ kdtree.nearestKSearch (cloud_big, pcl::Indices (),no_of_neighbors,indices,dists);
- std::vector<int> k_indices;
+ pcl::Indices k_indices;
k_indices.resize (no_of_neighbors);
std::vector<float> k_distances;
k_distances.resize (no_of_neighbors);
return (RUN_ALL_TESTS ());
}
-
#include <pcl/common/time.h>
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
-#include <pcl/search/pcl_search.h>
+#include <pcl/search/octree.h> // for pcl::search::Octree
using namespace pcl;
using namespace octree;
// instantiate point cloud
PointCloud<PointXYZ>::Ptr cloudIn (new PointCloud<PointXYZ> ());
- srand (static_cast<unsigned int> (time (nullptr)));
+ const unsigned int seed = time (nullptr);
+ srand (seed);
+ SCOPED_TRACE("seed=" + std::to_string(seed));
std::priority_queue<prioPointQueueEntry, pcl::PointCloud<prioPointQueueEntry>::VectorType> pointCandidates;
// create octree
pcl::search::Octree<PointXYZ> octree(0.1);
- std::vector<int> k_indices;
+ pcl::Indices k_indices;
std::vector<float> k_sqr_distances;
std::vector<int> k_indices_bruteforce;
// instantiate point cloud
PointCloud<PointXYZ>::Ptr cloudIn (new PointCloud<PointXYZ> ());
- srand (time (NULL));
+ const unsigned int seed = time (nullptr);
+ srand (seed);
+ SCOPED_TRACE("seed=" + std::to_string(seed));
double voxelResolution = 0.1;
}
}
// we should have found the absolute nearest neighbor at least once
- //ASSERT_EQ ( (bestMatchCount > 0) , true);
+ //ASSERT_GT (bestMatchCount, 0);
}
#endif
#if 0
radiuses.push_back(radius);
radiuses.push_back(radius);
radiuses.push_back(radius);
- std::vector<std::vector<int> > k_indices;
+ std::vector<pcl::Indices > k_indices;
std::vector<std::vector<float> > k_distances;
int max_nn = -1;
PointCloud<PointXYZ>::Ptr cloudIn (new PointCloud<PointXYZ> ());
PointCloud<PointXYZ>::Ptr cloudOut (new PointCloud<PointXYZ> ());
- srand (static_cast<unsigned int> (time (nullptr)));
+ const unsigned int seed = time (nullptr);
+ srand (seed);
+ SCOPED_TRACE("seed=" + std::to_string(seed));
for (unsigned int test_id = 0; test_id < test_runs; test_id++)
{
}
}
- std::vector<int> cloudNWRSearch;
+ pcl::Indices cloudNWRSearch;
std::vector<float> cloudNWRRadius;
// execute octree radius search
ASSERT_EQ ( cloudNWRRadius.size() , cloudSearchBruteforce.size());
// check if result from octree radius search can be also found in bruteforce search
- std::vector<int>::const_iterator current = cloudNWRSearch.begin();
- while (current != cloudNWRSearch.end())
+ for (const auto& current : cloudNWRSearch)
{
pointDist = sqrt (
- ((*cloudIn)[*current].x-searchPoint.x) * ((*cloudIn)[*current].x-searchPoint.x) +
- ((*cloudIn)[*current].y-searchPoint.y) * ((*cloudIn)[*current].y-searchPoint.y) +
- ((*cloudIn)[*current].z-searchPoint.z) * ((*cloudIn)[*current].z-searchPoint.z)
+ ((*cloudIn)[current].x-searchPoint.x) * ((*cloudIn)[current].x-searchPoint.x) +
+ ((*cloudIn)[current].y-searchPoint.y) * ((*cloudIn)[current].y-searchPoint.y) +
+ ((*cloudIn)[current].z-searchPoint.z) * ((*cloudIn)[current].z-searchPoint.z)
);
- ASSERT_EQ ( (pointDist<=searchRadius) , true);
-
- ++current;
+ ASSERT_LE (pointDist, searchRadius);
}
// check if result limitation works
octree->radiusSearch(searchPoint, searchRadius, cloudNWRSearch, cloudNWRRadius, 5);
- ASSERT_EQ ( cloudNWRRadius.size() <= 5, true);
+ ASSERT_LE (cloudNWRRadius.size(), 5);
}
}
// instantiate point cloud
PointCloud<PointXYZ>::Ptr cloudIn (new PointCloud<PointXYZ> ());
- srand (int (time (nullptr)));
+ const unsigned int seed = time (nullptr);
+ srand (seed);
+ SCOPED_TRACE("seed=" + std::to_string(seed));
// create organized search
search::OrganizedNeighbor<PointXYZ> organizedNeighborSearch;
- std::vector<int> k_indices;
+ pcl::Indices k_indices;
std::vector<float> k_sqr_distances;
std::vector<int> k_indices_bruteforce;
{
constexpr unsigned int test_runs = 10;
- srand (int (time (nullptr)));
+ const unsigned int seed = time (nullptr);
+ srand (seed);
+ SCOPED_TRACE("seed=" + std::to_string(seed));
search::OrganizedNeighbor<PointXYZ> organizedNeighborSearch;
}
}
- std::vector<int> cloudNWRSearch;
+ pcl::Indices cloudNWRSearch;
std::vector<float> cloudNWRRadius;
// execute organized search
organizedNeighborSearch.radiusSearch (searchPoint, searchRadius, cloudNWRSearch, cloudNWRRadius);
// check if result from organized radius search can be also found in bruteforce search
- std::vector<int>::const_iterator current = cloudNWRSearch.begin();
- while (current != cloudNWRSearch.end())
+ for (const auto& current : cloudNWRSearch)
{
pointDist = sqrt (
- ((*cloudIn)[*current].x-searchPoint.x) * ((*cloudIn)[*current].x-searchPoint.x) +
- ((*cloudIn)[*current].y-searchPoint.y) * ((*cloudIn)[*current].y-searchPoint.y) +
- ((*cloudIn)[*current].z-searchPoint.z) * ((*cloudIn)[*current].z-searchPoint.z)
+ ((*cloudIn)[current].x-searchPoint.x) * ((*cloudIn)[current].x-searchPoint.x) +
+ ((*cloudIn)[current].y-searchPoint.y) * ((*cloudIn)[current].y-searchPoint.y) +
+ ((*cloudIn)[current].z-searchPoint.z) * ((*cloudIn)[current].z-searchPoint.z)
);
- ASSERT_EQ ( (pointDist<=searchRadius) , true);
-
- ++current;
+ ASSERT_LE (pointDist, searchRadius);
}
// check if bruteforce result from organized radius search can be also found in bruteforce search
- current = cloudSearchBruteforce.begin();
- while (current != cloudSearchBruteforce.end())
+ for (const auto& current : cloudSearchBruteforce)
{
-
pointDist = sqrt (
- ((*cloudIn)[*current].x-searchPoint.x) * ((*cloudIn)[*current].x-searchPoint.x) +
- ((*cloudIn)[*current].y-searchPoint.y) * ((*cloudIn)[*current].y-searchPoint.y) +
- ((*cloudIn)[*current].z-searchPoint.z) * ((*cloudIn)[*current].z-searchPoint.z)
+ ((*cloudIn)[current].x-searchPoint.x) * ((*cloudIn)[current].x-searchPoint.x) +
+ ((*cloudIn)[current].y-searchPoint.y) * ((*cloudIn)[current].y-searchPoint.y) +
+ ((*cloudIn)[current].z-searchPoint.z) * ((*cloudIn)[current].z-searchPoint.z)
);
- ASSERT_EQ ( (pointDist<=searchRadius) , true);
-
- ++current;
+ ASSERT_LE (pointDist, searchRadius);
}
ASSERT_EQ (cloudNWRRadius.size() , cloudSearchBruteforce.size ());
// check if result limitation works
organizedNeighborSearch.radiusSearch (searchPoint, searchRadius, cloudNWRSearch, cloudNWRRadius, 5);
- ASSERT_EQ (cloudNWRRadius.size () <= 5, true);
+ ASSERT_LE (cloudNWRRadius.size (), 5);
}
}
#include <vector>
#include <cstdio>
#include <pcl/common/time.h>
-#include <pcl/common/geometry.h>
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
#include <pcl/io/pcd_io.h>
std::string pcd_filename;
+// Here we want a very precise distance function, speed is less important. So we use
+// double precision, unlike euclideanDistance() in pcl/common/distances and distance()
+// in pcl/common/geometry which use float (single precision) and possibly vectorization
+template <typename PointT> inline double
+point_distance(const PointT& p1, const PointT& p2)
+{
+ const double x_diff = p1.x - p2.x, y_diff = p1.y - p2.y, z_diff = p1.z - p2.z;
+ return std::sqrt(x_diff * x_diff + y_diff * y_diff + z_diff * z_diff);
+}
+
// helper class for priority queue
class prioPointQueueEntry
{
// instantiate point cloud
PointCloud<PointXYZ>::Ptr cloudIn (new PointCloud<PointXYZ> ());
- srand (time (NULL));
+ const unsigned int seed = time (nullptr);
+ srand (seed);
+ SCOPED_TRACE("seed=" + std::to_string(seed));
// create organized search
search::OrganizedNeighbor<PointXYZ> organizedNeighborSearch;
for (auto it = cloudIn->begin(); it != cloudIn->end(); ++it)
{
- const auto pointDist = geometry::distance(*it, searchPoint);
+ const auto pointDist = point_distance(*it, searchPoint);
prioPointQueueEntry pointEntry (*it, pointDist, std::distance(cloudIn->begin(), it));
pointCandidates.push (pointEntry);
}
// instantiate point cloud
- srand (time (NULL));
+ const unsigned int seed = time (nullptr);
+ srand (seed);
+ SCOPED_TRACE("seed=" + std::to_string(seed));
// create organized search
search::OrganizedNeighbor<PointXYZ> organizedNeighborSearch;
- std::vector<int> k_indices;
+ pcl::Indices k_indices;
std::vector<float> k_sqr_distances;
std::vector<int> k_indices_bruteforce;
// push all points and their distance to the search point into a priority queue - bruteforce approach.
for (auto it = cloudIn->begin(); it != cloudIn->end(); ++it)
{
- const auto pointDist = geometry::distance(*it, searchPoint);
+ const auto pointDist = point_distance(*it, searchPoint);
prioPointQueueEntry pointEntry (*it, pointDist, std::distance(cloudIn->begin(), it));
pointCandidates.push (pointEntry);
}
{
constexpr unsigned int test_runs = 10;
- srand (time (NULL));
+ const unsigned int seed = time (nullptr);
+ srand (seed);
+ SCOPED_TRACE("seed=" + std::to_string(seed));
search::OrganizedNeighbor<PointXYZ> organizedNeighborSearch;
std::vector<int> k_indices;
for (auto it = cloudIn->points.cbegin(); it != cloudIn->points.cend(); ++it)
{
- const auto pointDist = geometry::distance(*it, searchPoint);
+ const auto pointDist = point_distance(*it, searchPoint);
if (pointDist <= searchRadius)
{
}
}
- std::vector<int> cloudNWRSearch;
+ pcl::Indices cloudNWRSearch;
std::vector<float> cloudNWRRadius;
organizedNeighborSearch.setInputCloud (cloudIn);
// check if result from organized radius search can be also found in bruteforce search
for (const auto it : cloudNWRSearch)
{
- auto const pointDist = geometry::distance((*cloudIn)[it], searchPoint);
- ASSERT_EQ ( (pointDist <= searchRadius) , true);
+ const auto pointDist = point_distance((*cloudIn)[it], searchPoint);
+ ASSERT_LE (pointDist, searchRadius);
}
// check if bruteforce result from organized radius search can be also found in bruteforce search
for (const auto it : cloudSearchBruteforce)
{
- const auto pointDist = geometry::distance((*cloudIn)[it], searchPoint);
- ASSERT_EQ ( (pointDist <= searchRadius) , true);
+ const auto pointDist = point_distance((*cloudIn)[it], searchPoint);
+ ASSERT_LE (pointDist, searchRadius);
}
ASSERT_EQ (cloudNWRRadius.size() , cloudSearchBruteforce.size ());
// check if result limitation works
organizedNeighborSearch.radiusSearch (searchPoint, searchRadius, cloudNWRSearch, cloudNWRRadius, 5);
- ASSERT_EQ (cloudNWRRadius.size () <= 5, true);
+ ASSERT_LE (cloudNWRRadius.size (), 5);
}
}
TEST (PCL, Organized_Neighbor_Search_Pointcloud_Neighbours_Within_Radius_Search_Benchmark_Test)
{
constexpr unsigned int test_runs = 10;
- srand (time (NULL));
+ const unsigned int seed = time (nullptr);
+ srand (seed);
search::OrganizedNeighbor<PointXYZ> organizedNeighborSearch;
for (auto it = cloudIn->points.cbegin(); it != cloudIn->points.cend(); ++it)
{
- const auto pointDist = geometry::distance(*it, searchPoint);
+ const auto pointDist = point_distance(*it, searchPoint);
if (pointDist <= searchRadius)
{
}
}
- std::vector<int> cloudNWRSearch;
+ pcl::Indices cloudNWRSearch;
std::vector<float> cloudNWRRadius;
double check_time = getTime();
return;
}
constexpr unsigned int test_runs = 10;
- srand (time (NULL));
+ const unsigned int seed = time (nullptr);
+ srand (seed);
+ SCOPED_TRACE("seed=" + std::to_string(seed));
search::OrganizedNeighbor<PointXYZ> organizedNeighborSearch;
double searchRadius = 1.0 * (test_id*1.0/10*1.0);
double sum_time = 0, sum_time2 = 0;
- std::vector<int> cloudNWRSearch;
+ pcl::Indices cloudNWRSearch;
std::vector<float> cloudNWRRadius;
- std::vector<int> cloudNWRSearch2;
+ pcl::Indices cloudNWRSearch2;
std::vector<float> cloudNWRRadius2;
for (int iter = 0; iter < 100; iter++)
for (auto it = cloudIn->points.cbegin(); it != cloudIn->points.cend(); ++it)
{
- const auto pointDist = geometry::distance(*it, searchPoint);
+ const auto pointDist = point_distance(*it, searchPoint);
if (pointDist <= searchRadius)
{
// check if result from organized radius search can be also found in bruteforce search
for (const auto it : cloudNWRSearch)
{
- double pointDist = geometry::distance((*cloudIn)[it], searchPoint);
- ASSERT_EQ ( (pointDist <= searchRadius) , true);
+ const auto pointDist = point_distance((*cloudIn)[it], searchPoint);
+ ASSERT_LE (pointDist, searchRadius);
}
// check if bruteforce result from organized radius search can be also found in bruteforce search
for (const auto it : cloudSearchBruteforce)
{
- double pointDist = geometry::distance((*cloudIn)[it], searchPoint);
- ASSERT_EQ ( (pointDist <= searchRadius) , true);
+ const auto pointDist = point_distance((*cloudIn)[it], searchPoint);
+ ASSERT_LE (pointDist, searchRadius);
}
ASSERT_EQ (cloudNWRRadius.size() , cloudSearchBruteforce.size ());
// check if result limitation works
organizedNeighborSearch.radiusSearch (searchPoint, searchRadius, cloudNWRSearch, cloudNWRRadius, 5);
- ASSERT_EQ (cloudNWRRadius.size () <= 5, true);
+ ASSERT_LE (cloudNWRRadius.size (), 5);
}
}
std::uniform_real_distribution<float> rand_float (0.0f, 1.0f);
/** \brief used by the *_VIEW_* tests to use only a subset of points from the point cloud*/
-std::vector<int> unorganized_input_indices;
+pcl::Indices unorganized_input_indices;
/** \brief used by the *_VIEW_* tests to use only a subset of points from the point cloud*/
-std::vector<int> organized_input_indices;
+pcl::Indices organized_input_indices;
/** \brief instance of brute force search method to be tested*/
pcl::search::BruteForce<pcl::PointXYZ> brute_force;
std::vector<search::Search<PointXYZ>* > organized_search_methods;
/** \brief lists of indices to be used as query points for various search methods and different cloud types*/
-std::vector<int> unorganized_dense_cloud_query_indices;
-std::vector<int> unorganized_sparse_cloud_query_indices;
-std::vector<int> organized_sparse_query_indices;
+pcl::Indices unorganized_dense_cloud_query_indices;
+pcl::Indices unorganized_sparse_cloud_query_indices;
+pcl::Indices organized_sparse_query_indices;
/** \briet test whether the result of a search contains unique point ids or not
* @param indices resulting indices from a search
* @param name name of the search method that returned these distances
* @return true if indices are unique, false otherwise
*/
-bool testUniqueness (const std::vector<int>& indices, const std::string& name)
+bool testUniqueness (const pcl::Indices& indices, const std::string& name)
{
bool uniqueness = true;
for (unsigned idx1 = 1; idx1 < indices.size () && uniqueness; ++idx1)
* @return true if result is valid, false otherwise
*/
template<typename PointT> bool
-testResultValidity (const typename PointCloud<PointT>::ConstPtr point_cloud, const std::vector<bool>& indices_mask, const std::vector<bool>& nan_mask, const std::vector<int>& indices, const std::vector<int>& /*input_indices*/, const std::string& name)
+testResultValidity (const typename PointCloud<PointT>::ConstPtr point_cloud, const std::vector<bool>& indices_mask, const std::vector<bool>& nan_mask, const pcl::Indices& indices, const pcl::Indices& /*input_indices*/, const std::string& name)
{
bool validness = true;
- for (const int &index : indices)
+ for (const auto &index : indices)
{
if (!indices_mask [index])
{
* \param eps threshold for comparing the distances
* \return true if both sets are the same, false otherwise
*/
-bool compareResults (const std::vector<int>& indices1, const std::vector<float>& distances1, const std::string& name1,
- const std::vector<int>& indices2, const std::vector<float>& distances2, const std::string& name2, float eps)
+bool compareResults (const pcl::Indices& indices1, const std::vector<float>& distances1, const std::string& name1,
+ const pcl::Indices& indices2, const std::vector<float>& distances2, const std::string& name2, float eps)
{
bool equal = true;
if (indices1.size () != indices2.size ())
*/
template<typename PointT> void
testKNNSearch (typename PointCloud<PointT>::ConstPtr point_cloud, std::vector<search::Search<PointT>*> search_methods,
- const std::vector<int>& query_indices, const std::vector<int>& input_indices = std::vector<int> () )
+ const pcl::Indices& query_indices, const pcl::Indices& input_indices = pcl::Indices () )
{
- std::vector< std::vector<int> >indices (search_methods.size ());
+ std::vector< pcl::Indices >indices (search_methods.size ());
std::vector< std::vector<float> >distances (search_methods.size ());
std::vector<bool> passed (search_methods.size (), true);
if (!input_indices.empty ())
{
indices_mask.assign (point_cloud->size (), false);
- for (const int &input_index : input_indices)
+ for (const auto &input_index : input_indices)
indices_mask [input_index] = true;
}
for (unsigned knn = 1; knn <= 512; knn <<= 3)
{
// find nn for each point in the cloud
- for (const int &query_index : query_indices)
+ for (const auto &query_index : query_indices)
{
#pragma omp parallel for \
shared(indices, input_indices, indices_mask, distances, knn, nan_mask, passed, point_cloud, query_index, search_methods) \
*/
template<typename PointT> void
testRadiusSearch (typename PointCloud<PointT>::ConstPtr point_cloud, std::vector<search::Search<PointT>*> search_methods,
- const std::vector<int>& query_indices, const std::vector<int>& input_indices = std::vector<int> ())
+ const pcl::Indices& query_indices, const pcl::Indices& input_indices = pcl::Indices ())
{
- std::vector< std::vector<int> >indices (search_methods.size ());
+ std::vector< pcl::Indices >indices (search_methods.size ());
std::vector< std::vector<float> >distances (search_methods.size ());
std::vector<bool> passed (search_methods.size (), true);
std::vector<bool> indices_mask (point_cloud->size (), true);
if (!input_indices.empty ())
{
indices_mask.assign (point_cloud->size (), false);
- for (const int &input_index : input_indices)
+ for (const auto &input_index : input_indices)
indices_mask [input_index] = true;
}
{
//std::cout << radius << std::endl;
// find nn for each point in the cloud
- for (const int &query_index : query_indices)
+ for (const auto &query_index : query_indices)
{
#pragma omp parallel for \
default(none) \
// Test search on unorganized point clouds in a grid
TEST (PCL, unorganized_grid_cloud_Complete_Radius)
{
- std::vector<int> query_indices;
+ pcl::Indices query_indices;
query_indices.reserve (query_count);
unsigned skip = static_cast<unsigned> (unorganized_grid_cloud->size ()) / query_count;
* \param cloud input cloud required to check for nans and to get number of points
* \param[in] query_count maximum number of query points
*/
-void createQueryIndices (std::vector<int>& query_indices, PointCloud<PointXYZ>::ConstPtr point_cloud, unsigned query_count)
+void createQueryIndices (pcl::Indices& query_indices, PointCloud<PointXYZ>::ConstPtr point_cloud, unsigned query_count)
{
query_indices.clear ();
query_indices.reserve (query_count);
* \param indices
* \param max_index highest accented index usually given by cloud->size () - 1
*/
-void createIndices (std::vector<int>& indices, unsigned max_index)
+void createIndices (pcl::Indices& indices, unsigned max_index)
{
// ~10% of the input cloud
for (unsigned idx = 0; idx <= max_index; ++idx)
{
//construct dataset
pcl::PointCloud<pcl::PointXYZ> cloud_out_ltable;
- cloud_out_ltable.points.resize (100);
+ cloud_out_ltable.resize (100);
int npoints = 0;
for (std::size_t i = 0; i < 8; i++)
}
}
- cloud_out_ltable.points.resize (npoints);
+ cloud_out_ltable.resize (npoints);
pcl::PointCloud<pcl::PointXYZ>::ConstPtr cloudptr (new pcl::PointCloud<pcl::PointXYZ> (cloud_out_ltable));
{
//construct dataset
pcl::PointCloud<pcl::PointXYZ> cloud_out_ltable;
- cloud_out_ltable.points.resize (100);
+ cloud_out_ltable.resize (100);
int npoints = 0;
for (std::size_t i = 0; i < 8; i++)
cloud_out_ltable[npoints].z = 0.f;
npoints++;
- cloud_out_ltable.points.resize (npoints);
+ cloud_out_ltable.resize (npoints);
pcl::PointCloud<pcl::PointXYZ> hull;
std::vector<pcl::Vertices> polygons;
search::KdTree<PointXYZ>::Ptr tree3;
search::KdTree<PointNormal>::Ptr tree4;
+// Test that updatepointcloud works when adding points.
+////////////////////////////////////////////////////////////////////////////////
+TEST(PCL, PCLVisualizer_updatePointCloudAddPoint)
+{
+ pcl::common::CloudGenerator<pcl::PointXYZ, pcl::common::UniformGenerator<float> > generator;
+
+ pcl::PointCloud<pcl::PointXYZ>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZ>());
+ generator.fill(3, 1, *cloud);
+
+ // Setup a basic viewport window
+ pcl::visualization::PCLVisualizer::Ptr viewer(new pcl::visualization::PCLVisualizer("3D Viewer"));
+ viewer->addPointCloud<pcl::PointXYZ>(cloud);
+
+ cloud->push_back(pcl::PointXYZ());
+
+ viewer->updatePointCloud(cloud);
+ viewer->spinOnce(100);
+}
+
// Test that updatepointcloud works when removing points. Ie. modifying vtk data structure to reflect modified pointcloud
// See #4001 and #3452 for previously undetected error.
////////////////////////////////////////////////////////////////////////////////
-TEST(PCL, PCLVisualizer_updatePointCloud)
+TEST(PCL, PCLVisualizer_updatePointCloudRemovePoint)
{
pcl::common::CloudGenerator<pcl::PointXYZRGB, pcl::common::UniformGenerator<float> > generator;
PCL_ADD_EXECUTABLE(pcl_obj2pcd COMPONENT ${SUBSYS_NAME} SOURCES obj2pcd.cpp)
target_link_libraries(pcl_obj2pcd pcl_common pcl_io)
+ #TODO: Update when CMAKE 3.10 is available
+ if(NOT (${VTK_VERSION} VERSION_LESS 9.0))
+ target_link_libraries(pcl_obj2pcd VTK::FiltersCore)
+ endif()
PCL_ADD_EXECUTABLE(pcl_obj2ply COMPONENT ${SUBSYS_NAME} SOURCES obj2ply.cpp)
target_link_libraries(pcl_obj2ply pcl_common pcl_io)
PCL_ADD_EXECUTABLE(pcl_vtk2pcd COMPONENT ${SUBSYS_NAME} SOURCES vtk2pcd.cpp)
target_link_libraries(pcl_vtk2pcd pcl_common pcl_io)
+ #TODO: Update when CMAKE 3.10 is available
+ if(NOT (${VTK_VERSION} VERSION_LESS 9.0))
+ target_link_libraries(pcl_vtk2pcd VTK::FiltersCore)
+ endif()
if(BUILD_visualization)
PCL_ADD_EXECUTABLE(pcl_obj_rec_ransac_model_opps COMPONENT ${SUBSYS_NAME} SOURCES obj_rec_ransac_model_opps.cpp)
#pragma GCC system_header
#endif
-#ifndef Q_MOC_RUN
-// Marking all Boost headers as system headers to remove warnings
#include <boost/date_time/gregorian/gregorian_types.hpp>
#include <boost/date_time/posix_time/posix_time.hpp>
#include <boost/date_time/posix_time/posix_time_types.hpp>
-#endif
if (!std::isfinite ((*xyz_source)[point_i].x) || !std::isfinite ((*xyz_source)[point_i].y) || !std::isfinite ((*xyz_source)[point_i].z))
continue;
- std::vector<int> nn_indices (1);
+ pcl::Indices nn_indices (1);
std::vector<float> nn_distances (1);
if (!tree->nearestKSearch ((*xyz_source)[point_i], 1, nn_indices, nn_distances))
continue;
if (!std::isfinite ((*xyz_source)[point_i].x) || !std::isfinite ((*xyz_source)[point_i].y) || !std::isfinite ((*xyz_source)[point_i].z))
continue;
- std::vector<int> nn_indices (1);
+ pcl::Indices nn_indices (1);
std::vector<float> nn_distances (1);
if (!tree->nearestKSearch ((*xyz_source)[point_i], 1, nn_indices, nn_distances))
continue;
float max_dist_a = -std::numeric_limits<float>::max ();
for (const auto &point : cloud_a.points)
{
- std::vector<int> indices (1);
+ pcl::Indices indices (1);
std::vector<float> sqr_distances (1);
tree_b.nearestKSearch (point, 1, indices, sqr_distances);
float max_dist_b = -std::numeric_limits<float>::max ();
for (const auto &point : cloud_b.points)
{
- std::vector<int> indices (1);
+ pcl::Indices indices (1);
std::vector<float> sqr_distances (1);
tree_a.nearestKSearch (point, 1, indices, sqr_distances);
#include <iostream>
#include <pcl/console/time.h>
#include <pcl/io/pcd_io.h>
-#include <pcl/point_types.h>
Eigen::Vector4f translation;
Eigen::Quaternionf orientation;
// TODO:: make this as an optional argument ??
- std::vector<int> tmp_indices;
+ pcl::Indices tmp_indices;
pcl::removeNaNFromPointCloud (*cloud, *cloud, tmp_indices);
// parse optional input arguments from the command line
#include <pcl/console/parse.h>
#include <pcl/io/pcd_io.h>
#include <pcl/point_types.h>
-#include <pcl/common/transforms.h>
#include <pcl/registration/icp.h>
#include <pcl/registration/elch.h>
#include <pcl/console/print.h>
#include <pcl/console/parse.h>
#include <pcl/console/time.h>
+#include <boost/filesystem.hpp> // for path, exists, ...
+#include <boost/algorithm/string/case_conv.hpp> // for to_upper_copy
using namespace pcl;
using namespace pcl::io;
compute (cloud, output, sigma_s, sigma_r);
// Prepare output file name
- std::string filename = pcd_files[i];
- boost::trim (filename);
- std::vector<std::string> st;
- boost::split (st, filename, boost::is_any_of ("/\\"), boost::token_compress_on);
+ std::string filename = boost::filesystem::path(pcd_files[i]).filename().string();
// Save into the second file
- std::stringstream ss;
- ss << output_dir << "/" << st.at (st.size () - 1);
- saveCloud (ss.str (), output, translation, rotation);
+ const std::string filepath = output_dir + '/' + filename;
+ saveCloud (filepath, output, translation, rotation);
}
return (0);
}
#include <pcl/console/parse.h>
#include <pcl/console/time.h>
#include <pcl/filters/grid_minimum.h>
+#include <boost/filesystem.hpp> // for path, exists, ...
+#include <boost/algorithm/string/case_conv.hpp> // for to_upper_copy
using namespace pcl;
using namespace pcl::io;
compute (cloud, output, resolution);
// Prepare output file name
- std::string filename = pcd_file;
- boost::trim (filename);
- boost::split (st, filename, boost::is_any_of ("/\\"), boost::token_compress_on);
+ std::string filename = boost::filesystem::path(pcd_file).filename().string();
// Save into the second file
- std::stringstream ss;
- ss << output_dir << "/" << st.at (st.size () - 1);
- saveCloud (ss.str (), output);
+ const std::string filepath = output_dir + '/' + filename;
+ saveCloud (filepath, output);
}
return (0);
}
#include <pcl/visualization/image_viewer.h>
#include <pcl/io/openni_camera/openni_driver.h>
#include <pcl/console/parse.h>
-#include <pcl/visualization/boost.h>
#include <pcl/visualization/mouse_event.h>
#include <boost/algorithm/string.hpp>
#include <pcl/io/image_grabber.h>
#include <pcl/console/parse.h>
#include <pcl/console/print.h>
-#include <pcl/visualization/boost.h>
-#include <pcl/visualization/cloud_viewer.h>
-#include <pcl/visualization/image_viewer.h>
#include <pcl/io/pcd_io.h>
+#include <boost/filesystem.hpp> // for exists
using pcl::console::print_error;
using pcl::console::print_info;
void
cloud_cb (const pcl::PointCloud<pcl::PointXYZRGBA>::ConstPtr & cloud)
{
- std::stringstream ss;
- ss << out_folder << "/" << grabber->getPrevDepthFileName() << ".pcd";
- pcl::io::savePCDFileASCII (ss.str(), *cloud);
+ const std::string filepath = out_folder + '/' + grabber->getPrevDepthFileName() + ".pcd";
+ pcl::io::savePCDFileASCII (filepath, *cloud);
}
};
#include <pcl/io/image_grabber.h>
#include <pcl/console/parse.h>
#include <pcl/console/print.h>
-#include <pcl/visualization/boost.h>
#include <pcl/visualization/cloud_viewer.h>
#include <pcl/visualization/image_viewer.h>
-#include <pcl/io/pcd_io.h>
#include <mutex>
#include <thread>
+#include <boost/filesystem.hpp> // for exists
using namespace std::chrono_literals;
using pcl::console::print_error;
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
-#include <pcl/common/time.h> //fps calculations
#include <pcl/io/pcd_io.h>
#include <pcl/visualization/cloud_viewer.h>
#include <pcl/visualization/image_viewer.h>
-#include <pcl/io/openni_camera/openni_driver.h>
-#include <pcl/console/parse.h>
#include <vector>
int
#include <pcl/console/parse.h>
#include <pcl/console/time.h>
#include <pcl/filters/local_maximum.h>
+#include <boost/filesystem.hpp> // for path, exists, ...
+#include <boost/algorithm/string/case_conv.hpp> // for to_upper_copy
using namespace pcl;
using namespace pcl::io;
compute (cloud, output, radius);
// Prepare output file name
- std::string filename = pcd_file;
- boost::trim (filename);
- boost::split (st, filename, boost::is_any_of ("/\\"), boost::token_compress_on);
+ std::string filename = boost::filesystem::path(pcd_file).filename().string();
// Save into the second file
- std::stringstream ss;
- ss << output_dir << "/" << st.at (st.size () - 1);
- saveCloud (ss.str (), output);
+ const std::string filepath = output_dir + '/' + filename;
+ saveCloud (filepath, output);
}
return (0);
}
#include <pcl/console/parse.h>
#include <pcl/io/pcd_io.h>
#include <pcl/point_types.h>
-#include <pcl/common/transforms.h>
#include <pcl/registration/lum.h>
#include <pcl/registration/correspondence_estimation.h>
#include <pcl/point_types.h>
#include <pcl/io/pcd_io.h>
#include <pcl/console/print.h>
-#include <pcl/console/parse.h>
#include <pcl/console/time.h>
#include <pcl/recognition/linemod.h>
*/
#include <pcl/visualization/pcl_visualizer.h>
+#include <pcl/visualization/vtk/pcl_vtk_compatibility.h>
#include <pcl/io/pcd_io.h>
#include <pcl/io/vtk_lib_io.h>
-#include <pcl/common/transforms.h>
#include <vtkVersion.h>
#include <vtkPLYReader.h>
#include <vtkOBJReader.h>
double A[3], B[3], C[3];
vtkIdType npts = 0;
- vtkIdType *ptIds = nullptr;
+ vtkCellPtsPtr ptIds = nullptr;
+
polydata->GetCellPoints (el, npts, ptIds);
polydata->GetPoint (ptIds[0], A);
polydata->GetPoint (ptIds[1], B);
{
static bool printed_once = false;
if (!printed_once)
- PCL_WARN ("Mesh has no vertex colors, or vertex colors are not RGB!");
+ PCL_WARN ("Mesh has no vertex colors, or vertex colors are not RGB!\n");
printed_once = true;
}
}
double p1[3], p2[3], p3[3], totalArea = 0;
std::vector<double> cumulativeAreas (cells->GetNumberOfCells (), 0);
- vtkIdType npts = 0, *ptIds = nullptr;
+ vtkIdType npts = 0;
+ vtkCellPtsPtr ptIds = nullptr;
std::size_t cellId = 0;
for (cells->InitTraversal (); cells->GetNextCell (npts, ptIds); cellId++)
{
cumulativeAreas[cellId] = totalArea;
}
- cloud_out.points.resize (n_samples);
+ cloud_out.resize (n_samples);
cloud_out.width = static_cast<std::uint32_t> (n_samples);
cloud_out.height = 1;
#include <pcl/console/parse.h>
#include <pcl/console/time.h>
#include <pcl/surface/mls.h>
-#include <pcl/filters/voxel_grid.h>
#include <pcl/search/kdtree.h> // for KdTree
using namespace pcl;
#include <pcl/console/parse.h>
#include <pcl/console/time.h>
#include <pcl/filters/morphological_filter.h>
+#include <boost/filesystem.hpp> // for path, exists, ...
+#include <boost/algorithm/string/case_conv.hpp> // for to_upper_copy
using namespace pcl;
using namespace pcl::io;
compute (cloud, output, resolution, method);
// Prepare output file name
- std::string filename = pcd_file;
- boost::trim (filename);
- boost::split (st, filename, boost::is_any_of ("/\\"), boost::token_compress_on);
-
+ std::string filename = boost::filesystem::path(pcd_file).filename().string();
+
// Save into the second file
- std::stringstream ss;
- ss << output_dir << "/" << st.at (st.size () - 1);
- saveCloud (ss.str (), output);
+ const std::string filepath = output_dir + '/' + filename;
+ saveCloud (filepath, output);
}
return (0);
}
#include <pcl/io/pcd_io.h>
#include <pcl/point_types.h>
#include <pcl/registration/ndt_2d.h>
-#include <pcl/registration/transformation_estimation_lm.h>
-#include <pcl/registration/warp_point_rigid_3d.h>
#include <string>
#include <iostream>
#include <pcl/console/print.h>
#include <pcl/console/parse.h>
#include <pcl/console/time.h>
+#include <boost/filesystem.hpp> // for path, exists, ...
+#include <boost/algorithm/string/case_conv.hpp> // for to_upper_copy
using namespace pcl;
using namespace pcl::io;
compute (cloud, output, k, radius);
// Prepare output file name
- std::string filename = pcd_files[i];
- boost::trim (filename);
- std::vector<std::string> st;
- boost::split (st, filename, boost::is_any_of ("/\\"), boost::token_compress_on);
+ std::string filename = boost::filesystem::path(pcd_files[i]).filename().string();
// Save into the second file
- std::stringstream ss;
- ss << output_dir << "/" << st.at (st.size () - 1);
- saveCloud (ss.str (), output, translation, rotation);
+ const std::string filepath = output_dir + '/' + filename;
+ saveCloud (filepath, output, translation, rotation);
}
return (0);
}
#include <pcl/recognition/ransac_based/obj_rec_ransac.h>
#include <pcl/visualization/pcl_visualizer.h>
#include <pcl/console/print.h>
-#include <pcl/console/parse.h>
-#include <pcl/io/pcd_io.h>
#include <pcl/point_cloud.h>
#include <vtkVersion.h>
#include <vtkPolyDataReader.h>
#include <pcl/recognition/ransac_based/obj_rec_ransac.h>
#include <pcl/visualization/pcl_visualizer.h>
#include <pcl/console/print.h>
-#include <pcl/console/parse.h>
-#include <pcl/io/pcd_io.h>
#include <pcl/point_cloud.h>
#include <vtkPolyDataReader.h>
#include <vtkDoubleArray.h>
#include <pcl/recognition/ransac_based/obj_rec_ransac.h>
#include <pcl/visualization/pcl_visualizer.h>
#include <pcl/console/print.h>
-#include <pcl/console/parse.h>
-#include <pcl/io/pcd_io.h>
#include <pcl/point_cloud.h>
#include <vtkVersion.h>
#include <vtkPolyDataReader.h>
#include <pcl/recognition/ransac_based/obj_rec_ransac.h>
#include <pcl/visualization/pcl_visualizer.h>
#include <pcl/console/print.h>
-#include <pcl/io/pcd_io.h>
#include <pcl/point_cloud.h>
#include <vtkVersion.h>
#include <vtkPolyDataReader.h>
// Make sure that the ids are sorted
sort (inliers->indices.begin (), inliers->indices.end ());
- std::size_t j = 0;
- for ( std::size_t i = 0, id = 0 ; i < inliers->indices.size () ; )
+ pcl::uindex_t j = 0, i = 0;
+ for ( pcl::index_t id = 0 ; i < inliers->indices.size () ; ++id)
{
- if ( static_cast<int> (id) == inliers->indices[i] )
+ if ( id == inliers->indices[i] )
{
plane_points[i] = (*all_points)[id];
++i;
non_plane_normals[j] = (*all_normals)[id];
++j;
}
- ++id;
}
// Just copy the rest of the non-plane points
#include <pcl/recognition/ransac_based/obj_rec_ransac.h>
#include <pcl/visualization/pcl_visualizer.h>
#include <pcl/console/print.h>
-#include <pcl/io/pcd_io.h>
#include <pcl/point_cloud.h>
#include <vtkVersion.h>
#include <vtkPolyDataReader.h>
#include <pcl/io/auto_io.h>
#include <pcl/common/time.h>
#include <pcl/visualization/pcl_visualizer.h>
-#include <pcl/visualization/point_cloud_handlers.h>
#include <pcl/visualization/common/common.h>
#include <pcl/octree/octree_pointcloud_voxelcentroid.h>
}
//remove NaN Points
- std::vector<int> nanIndexes;
+ pcl::Indices nanIndexes;
pcl::removeNaNFromPointCloud(*cloud, *cloud, nanIndexes);
std::cout << "Loaded " << cloud->size() << " points" << std::endl;
viz.addText (dataDisplay, 0, 45, 1.0, 0.0, 0.0, "disp_original_points");
char level[256];
- sprintf (level, "Displayed depth is %d on %d", displayedDepth, octree.getTreeDepth());
+ sprintf (level, "Displayed depth is %d on %zu", displayedDepth, static_cast<std::size_t>(octree.getTreeDepth()));
viz.removeShape ("level_t1");
viz.addText (level, 0, 30, 1.0, 0.0, 0.0, "level_t1");
#include <pcl/common/time.h> //fps calculations
#include <pcl/console/parse.h>
#include <pcl/io/oni_grabber.h>
-#include <pcl/visualization/boost.h>
#include <pcl/visualization/cloud_viewer.h>
#include <pcl/common/time_trigger.h>
#include <mutex>
-#include <vector>
#define SHOW_FPS 1
#if SHOW_FPS
#define MEASURE_FUNCTION_TIME
#include <pcl/common/time.h> //fps calculations
-#include <pcl/common/angles.h>
#include <pcl/io/openni2_grabber.h>
#include <pcl/io/openni2/openni.h>
#include <pcl/io/openni2/openni2_device_manager.h>
#include <pcl/visualization/pcl_visualizer.h>
-#include <pcl/visualization/boost.h>
#include <pcl/visualization/image_viewer.h>
#include <pcl/console/print.h>
#include <pcl/console/parse.h>
#include <pcl/common/time.h> //fps calculations
#include <pcl/io/openni_grabber.h>
#include <pcl/io/lzf_image_io.h>
-#include <pcl/visualization/boost.h>
#include <pcl/visualization/common/float_image_utils.h>
#include <pcl/visualization/image_viewer.h>
#include <pcl/io/openni_camera/openni_driver.h>
#include <pcl/visualization/mouse_event.h>
#include <boost/circular_buffer.hpp>
+#include <boost/date_time/posix_time/posix_time.hpp> // for ptime, to_iso_string, ...
#include <csignal>
#include <limits>
FPS_CALC_WRITER ("data write ", buf_);
nr_frames_total++;
- std::stringstream ss1, ss2, ss3;
-
std::string time_string = boost::posix_time::to_iso_string (frame->time);
// Save RGB data
- ss1 << "frame_" << time_string << "_rgb.pclzf";
+ const std::string rgb_filename = "frame_" + time_string + "_rgb.pclzf";
switch (frame->image->getEncoding ())
{
case openni_wrapper::Image::YUV422:
{
io::LZFYUV422ImageWriter lrgb;
- lrgb.write (reinterpret_cast<const char*> (&frame->image->getMetaData ().Data ()[0]), frame->image->getWidth (), frame->image->getHeight (), ss1.str ());
+ lrgb.write (reinterpret_cast<const char*> (&frame->image->getMetaData ().Data ()[0]), frame->image->getWidth (), frame->image->getHeight (), rgb_filename);
break;
}
case openni_wrapper::Image::RGB:
{
io::LZFRGB24ImageWriter lrgb;
- lrgb.write (reinterpret_cast<const char*> (&frame->image->getMetaData ().Data ()[0]), frame->image->getWidth (), frame->image->getHeight (), ss1.str ());
+ lrgb.write (reinterpret_cast<const char*> (&frame->image->getMetaData ().Data ()[0]), frame->image->getWidth (), frame->image->getHeight (), rgb_filename);
break;
}
case openni_wrapper::Image::BAYER_GRBG:
{
io::LZFBayer8ImageWriter lrgb;
- lrgb.write (reinterpret_cast<const char*> (&frame->image->getMetaData ().Data ()[0]), frame->image->getWidth (), frame->image->getHeight (), ss1.str ());
+ lrgb.write (reinterpret_cast<const char*> (&frame->image->getMetaData ().Data ()[0]), frame->image->getWidth (), frame->image->getHeight (), rgb_filename);
break;
}
}
// Save depth data
- ss2 << "frame_" + time_string + "_depth.pclzf";
+ const std::string depth_filename = "frame_" + time_string + "_depth.pclzf";
io::LZFDepth16ImageWriter ld;
//io::LZFShift11ImageWriter ld;
- ld.write (reinterpret_cast<const char*> (&frame->depth_image->getDepthMetaData ().Data ()[0]), frame->depth_image->getWidth (), frame->depth_image->getHeight (), ss2.str ());
+ ld.write (reinterpret_cast<const char*> (&frame->depth_image->getDepthMetaData ().Data ()[0]), frame->depth_image->getWidth (), frame->depth_image->getHeight (), depth_filename);
// Save depth data
- ss3 << "frame_" << time_string << ".xml";
+ const std::string xml_filename = "frame_" + time_string + ".xml";
io::LZFRGB24ImageWriter lrgb;
- lrgb.writeParameters (frame->parameters_rgb, ss3.str ());
- ld.writeParameters (frame->parameters_depth, ss3.str ());
+ lrgb.writeParameters (frame->parameters_rgb, xml_filename);
+ ld.writeParameters (frame->parameters_depth, xml_filename);
// By default, the z-value depth multiplication factor is written as part of the LZFDepthImageWriter's writeParameters as 0.001
// If you want to change that, uncomment the next line and change the value
//ld.writeParameter (0.001, "depth.z_multiplication_factor", ss3.str ());
#include <pcl/io/openni_grabber.h>
#include <pcl/io/openni_camera/openni_driver.h>
#include <pcl/console/parse.h>
-#include <pcl/visualization/vtk.h>
#include <pcl/visualization/pcl_visualizer.h>
-#include "boost.h"
+#include <vtkSmartPointer.h>
+#include <vtkImageImport.h>
+#include <vtkTIFFWriter.h>
+#include <vtkImageFlip.h>
#include <mutex>
#include <string>
#include <vector>
+#include <boost/date_time/posix_time/posix_time.hpp> // for to_iso_string, local_time
#define SHOW_FPS 1
data = reinterpret_cast<const void*> (rgb_data);
}
- std::stringstream ss;
- ss << "frame_" + time + "_rgb.tiff";
+ const std::string filename = "frame_" + time + "_rgb.tiff";
importer_->SetImportVoidPointer (const_cast<void*>(data), 1);
importer_->Update ();
flipper_->SetInputConnection (importer_->GetOutputPort ());
flipper_->Update ();
- writer_->SetFileName (ss.str ().c_str ());
+ writer_->SetFileName (filename.c_str ());
writer_->SetInputConnection (flipper_->GetOutputPort ());
writer_->Write ();
}
openni_wrapper::DepthImage::Ptr depth_image;
depth_image.swap (depth_image_);
- std::stringstream ss;
- ss << "frame_" + time + "_depth.tiff";
+ const std::string filename = "frame_" + time + "_depth.tiff";
depth_importer_->SetWholeExtent (0, depth_image->getWidth () - 1, 0, depth_image->getHeight () - 1, 0, 0);
depth_importer_->SetDataExtentToWholeExtent ();
depth_importer_->Update ();
flipper_->SetInputConnection (depth_importer_->GetOutputPort ());
flipper_->Update ();
- writer_->SetFileName (ss.str ().c_str ());
+ writer_->SetFileName (filename.c_str ());
writer_->SetInputConnection (flipper_->GetOutputPort ());
writer_->Write ();
}
#include <pcl/common/time.h> //fps calculations
#include <pcl/io/openni_grabber.h>
#include <pcl/visualization/pcl_visualizer.h>
-#include <pcl/visualization/boost.h>
#include <pcl/visualization/image_viewer.h>
#include <pcl/console/print.h>
#include <pcl/console/parse.h>
#include <pcl/io/openni_camera/openni_driver.h>
#include <pcl/console/print.h>
#include <pcl/console/parse.h>
-#include <pcl/visualization/boost.h>
#include <pcl/visualization/mouse_event.h>
#include <vtkImageViewer.h>
pcl::PointIndices::Ptr removed_indices (new PointIndices),
indices (new PointIndices);
- std::vector<int> valid_indices;
+ pcl::Indices valid_indices;
if (keep_organized)
{
xyz_cloud = xyz_cloud_pre;
else
{
// Make sure we are addressing values in the original index vector
- for (std::size_t i = 0; i < removed_indices->indices.size (); ++i)
- indices->indices.push_back (valid_indices[removed_indices->indices[i]]);
+ for (const auto& i : (removed_indices->indices))
+ indices->indices.push_back (valid_indices[i]);
// Extract the indices of the remaining points
pcl::ExtractIndices<pcl::PCLPointCloud2> ei;
*/
#include <pcl/PCLPointCloud2.h>
-#include <pcl/point_types.h>
#include <pcl/io/pcd_io.h>
#include <pcl/console/print.h>
#include <pcl/console/parse.h>
#include <pcl/console/time.h>
#include <pcl/filters/passthrough.h>
+#include <boost/filesystem.hpp> // for path, exists, ...
+#include <boost/algorithm/string/case_conv.hpp> // for to_upper_copy
using namespace pcl;
using namespace pcl::io;
compute (cloud, output, field_name, min, max, inside, keep_organized);
// Prepare output file name
- std::string filename = pcd_file;
- boost::trim (filename);
- boost::split (st, filename, boost::is_any_of ("/\\"), boost::token_compress_on);
+ std::string filename = boost::filesystem::path(pcd_file).filename().string();
// Save into the second file
- std::stringstream ss;
- ss << output_dir << "/" << st.at (st.size () - 1);
- saveCloud (ss.str (), output);
+ const std::string filepath = output_dir + '/' + filename;
+ saveCloud (filepath, output);
}
return (0);
}
#include <pcl/io/pcd_io.h>
#include <pcl/io/png_io.h>
#include <pcl/conversions.h>
+#include <boost/lexical_cast.hpp> // for lexical_cast
using namespace pcl;
using namespace pcl::io;
#include <pcl/io/pcd_grabber.h>
#include <pcl/console/parse.h>
#include <pcl/console/print.h>
-#include <pcl/visualization/boost.h>
#include <pcl/visualization/cloud_viewer.h>
#include <pcl/visualization/image_viewer.h>
-
+#include <boost/filesystem.hpp> // for exists, extension, ...
+#include <boost/algorithm/string/case_conv.hpp> // for to_upper_copy
#include <mutex>
#include <thread>
#include <pcl/common/common.h>
#include <pcl/io/pcd_io.h>
#include <cfloat>
-#include <pcl/visualization/eigen.h>
-//#include <pcl/visualization/vtk.h>
-#include <pcl/visualization/point_cloud_handlers.h>
#include <pcl/visualization/pcl_visualizer.h>
#include <pcl/visualization/image_viewer.h>
#include <pcl/visualization/histogram_visualizer.h>
search.setInputCloud (xyzcloud);
}
// Return the correct index in the cloud instead of the index on the screen
- std::vector<int> indices (1);
+ pcl::Indices indices (1);
std::vector<float> distances (1);
// Because VTK/OpenGL stores data without NaN, we lose the 1-1 correspondence, so we must search for the real point
}
// Else, if a single point has been selected
- std::stringstream ss;
- ss << idx;
+ const std::string idx_string = std::to_string(idx);
// Get the cloud's fields
for (std::size_t i = 0; i < cloud->fields.size (); ++i)
{
if (!isMultiDimensionalFeatureField (cloud->fields[i]))
continue;
PCL_INFO ("Multidimensional field found: %s\n", cloud->fields[i].name.c_str ());
- ph_global.addFeatureHistogram (*cloud, cloud->fields[i].name, idx, ss.str ());
+ ph_global.addFeatureHistogram (*cloud, cloud->fields[i].name, idx, idx_string);
ph_global.renderOnce ();
}
if (p)
{
pcl::PointXYZ pos;
event.getPoint (pos.x, pos.y, pos.z);
- p->addText3D<pcl::PointXYZ> (ss.str (), pos, 0.0005, 1.0, 1.0, 1.0, ss.str ());
+ p->addText3D<pcl::PointXYZ> (idx_string, pos, 0.0005, 1.0, 1.0, 1.0, idx_string);
}
}
}
// Add as actor
- std::stringstream cloud_name ("vtk-");
- cloud_name << argv[vtk_file_indices.at (i)] << "-" << i;
- p->addModelFromPolyData (polydata, cloud_name.str (), viewport);
+ const std::string cloud_name = "vtk-" + std::string(argv[vtk_file_indices.at (i)]) + "-" + std::to_string(i);
+ p->addModelFromPolyData (polydata, cloud_name, viewport);
// Change the shape rendered color
if (fcolorparam && fcolor_r.size () > i && fcolor_g.size () > i && fcolor_b.size () > i)
- p->setShapeRenderingProperties (pcl::visualization::PCL_VISUALIZER_COLOR, fcolor_r[i], fcolor_g[i], fcolor_b[i], cloud_name.str ());
+ p->setShapeRenderingProperties (pcl::visualization::PCL_VISUALIZER_COLOR, fcolor_r[i], fcolor_g[i], fcolor_b[i], cloud_name);
// Change the shape rendered point size
if (!psize.empty ())
- p->setShapeRenderingProperties (pcl::visualization::PCL_VISUALIZER_POINT_SIZE, psize.at (i), cloud_name.str ());
+ p->setShapeRenderingProperties (pcl::visualization::PCL_VISUALIZER_POINT_SIZE, psize.at (i), cloud_name);
// Change the shape rendered opacity
if (!opaque.empty ())
- p->setShapeRenderingProperties (pcl::visualization::PCL_VISUALIZER_OPACITY, opaque.at (i), cloud_name.str ());
+ p->setShapeRenderingProperties (pcl::visualization::PCL_VISUALIZER_OPACITY, opaque.at (i), cloud_name);
// Change the shape rendered shading
if (!shadings.empty ())
if (shadings[i] == "flat")
{
print_highlight (stderr, "Setting shading property for %s to FLAT.\n", argv[vtk_file_indices.at (i)]);
- p->setShapeRenderingProperties (pcl::visualization::PCL_VISUALIZER_SHADING, pcl::visualization::PCL_VISUALIZER_SHADING_FLAT, cloud_name.str ());
+ p->setShapeRenderingProperties (pcl::visualization::PCL_VISUALIZER_SHADING, pcl::visualization::PCL_VISUALIZER_SHADING_FLAT, cloud_name);
}
else if (shadings[i] == "gouraud")
{
print_highlight (stderr, "Setting shading property for %s to GOURAUD.\n", argv[vtk_file_indices.at (i)]);
- p->setShapeRenderingProperties (pcl::visualization::PCL_VISUALIZER_SHADING, pcl::visualization::PCL_VISUALIZER_SHADING_GOURAUD, cloud_name.str ());
+ p->setShapeRenderingProperties (pcl::visualization::PCL_VISUALIZER_SHADING, pcl::visualization::PCL_VISUALIZER_SHADING_GOURAUD, cloud_name);
}
else if (shadings[i] == "phong")
{
print_highlight (stderr, "Setting shading property for %s to PHONG.\n", argv[vtk_file_indices.at (i)]);
- p->setShapeRenderingProperties (pcl::visualization::PCL_VISUALIZER_SHADING, pcl::visualization::PCL_VISUALIZER_SHADING_PHONG, cloud_name.str ());
+ p->setShapeRenderingProperties (pcl::visualization::PCL_VISUALIZER_SHADING, pcl::visualization::PCL_VISUALIZER_SHADING_PHONG, cloud_name);
}
}
}
origin.block<3, 1> (0, 0) = (pose * Eigen::Translation3f (origin.block<3, 1> (0, 0))).translation ();
}
- std::stringstream cloud_name;
+ std::string cloud_name;
// ---[ Special check for 1-point multi-dimension histograms
if (cloud->fields.size () == 1 && isMultiDimensionalFeatureField (cloud->fields[0]))
{
- cloud_name << argv[p_file_indices.at (i)];
+ cloud_name = argv[p_file_indices.at (i)];
if (!ph)
ph.reset (new pcl::visualization::PCLPlotter);
pcl::getMinMax (*cloud, 0, cloud->fields[0].name, min_p, max_p);
- ph->addFeatureHistogram (*cloud, cloud->fields[0].name, cloud_name.str ());
+ ph->addFeatureHistogram (*cloud, cloud->fields[0].name, cloud_name);
print_info ("[done, "); print_value ("%g", tt.toc ()); print_info (" ms : "); print_value ("%d", cloud->fields[0].count); print_info (" points]\n");
continue;
}
print_info ("[done, "); print_value ("%g", tt.toc ()); print_info (" ms : "); print_value ("%u", cloud->width * cloud->height); print_info (" points]\n");
print_info ("Available dimensions: "); print_value ("%s\n", pcl::getFieldsList (*cloud).c_str ());
- std::stringstream name;
- name << "PCD Viewer :: " << argv[p_file_indices.at (i)];
- pcl::visualization::ImageViewer::Ptr img (new pcl::visualization::ImageViewer (name.str ()));
+ std::string name = "PCD Viewer :: " + std::string(argv[p_file_indices.at (i)]);
+ pcl::visualization::ImageViewer::Ptr img (new pcl::visualization::ImageViewer (name));
pcl::PointCloud<pcl::RGB> rgb_cloud;
pcl::fromPCLPointCloud2 (*cloud, rgb_cloud);
continue;
}
- cloud_name << argv[p_file_indices.at (i)] << "-" << i;
+ cloud_name += std::string(argv[p_file_indices.at (i)]) + "-" + std::to_string(i);
// Create the PCLVisualizer object here on the first encountered XYZ file
if (!p)
// Add the dataset with a XYZ and a random handler
geometry_handler.reset (new pcl::visualization::PointCloudGeometryHandlerXYZ<pcl::PCLPointCloud2> (cloud));
// Add the cloud to the renderer
- //p->addPointCloud<pcl::PointXYZ> (cloud_xyz, geometry_handler, color_handler, cloud_name.str (), viewport);
- p->addPointCloud (cloud, geometry_handler, color_handler, origin, orientation, cloud_name.str (), viewport);
+ //p->addPointCloud<pcl::PointXYZ> (cloud_xyz, geometry_handler, color_handler, cloud_name, viewport);
+ p->addPointCloud (cloud, geometry_handler, color_handler, origin, orientation, cloud_name, viewport);
if (mview)
pcl::PointCloud<pcl::Normal>::Ptr cloud_normals (new pcl::PointCloud<pcl::Normal>);
pcl::fromPCLPointCloud2 (*cloud, *cloud_normals);
- std::stringstream cloud_name_normals;
- cloud_name_normals << argv[p_file_indices.at (i)] << "-" << i << "-normals";
- p->addPointCloudNormals<pcl::PointXYZ, pcl::Normal> (cloud_xyz, cloud_normals, normals, normals_scale, cloud_name_normals.str (), viewport);
+ const std::string cloud_name_normals = std::string(argv[p_file_indices.at (i)]) + "-" + std::to_string(i) + "-normals";
+ p->addPointCloudNormals<pcl::PointXYZ, pcl::Normal> (cloud_xyz, cloud_normals, normals, normals_scale, cloud_name_normals, viewport);
}
// If principal curvature lines are enabled
pcl::fromPCLPointCloud2 (*cloud, *cloud_normals);
pcl::PointCloud<pcl::PrincipalCurvatures>::Ptr cloud_pc (new pcl::PointCloud<pcl::PrincipalCurvatures>);
pcl::fromPCLPointCloud2 (*cloud, *cloud_pc);
- std::stringstream cloud_name_normals_pc;
- cloud_name_normals_pc << argv[p_file_indices.at (i)] << "-" << i << "-normals";
+ std::string cloud_name_normals_pc = std::string(argv[p_file_indices.at (i)]) + "-" + std::to_string(i) + "-normals";
int factor = (std::min)(normals, pc);
- p->addPointCloudNormals<pcl::PointXYZ, pcl::Normal> (cloud_xyz, cloud_normals, factor, normals_scale, cloud_name_normals_pc.str (), viewport);
- p->setPointCloudRenderingProperties (pcl::visualization::PCL_VISUALIZER_COLOR, 1.0, 0.0, 0.0, cloud_name_normals_pc.str ());
- p->setPointCloudRenderingProperties (pcl::visualization::PCL_VISUALIZER_LINE_WIDTH, 3, cloud_name_normals_pc.str ());
- cloud_name_normals_pc << "-pc";
- p->addPointCloudPrincipalCurvatures<pcl::PointXYZ, pcl::Normal> (cloud_xyz, cloud_normals, cloud_pc, factor, pc_scale, cloud_name_normals_pc.str (), viewport);
- p->setPointCloudRenderingProperties (pcl::visualization::PCL_VISUALIZER_LINE_WIDTH, 3, cloud_name_normals_pc.str ());
+ p->addPointCloudNormals<pcl::PointXYZ, pcl::Normal> (cloud_xyz, cloud_normals, factor, normals_scale, cloud_name_normals_pc, viewport);
+ p->setPointCloudRenderingProperties (pcl::visualization::PCL_VISUALIZER_COLOR, 1.0, 0.0, 0.0, cloud_name_normals_pc);
+ p->setPointCloudRenderingProperties (pcl::visualization::PCL_VISUALIZER_LINE_WIDTH, 3, cloud_name_normals_pc);
+ cloud_name_normals_pc += "-pc";
+ p->addPointCloudPrincipalCurvatures<pcl::PointXYZ, pcl::Normal> (cloud_xyz, cloud_normals, cloud_pc, factor, pc_scale, cloud_name_normals_pc, viewport);
+ p->setPointCloudRenderingProperties (pcl::visualization::PCL_VISUALIZER_LINE_WIDTH, 3, cloud_name_normals_pc);
}
// Add every dimension as a possible color
color_handler.reset (new pcl::visualization::PointCloudColorHandlerGenericField<pcl::PCLPointCloud2> (cloud, cloud->fields[f].name));
}
// Add the cloud to the renderer
- //p->addPointCloud<pcl::PointXYZ> (cloud_xyz, color_handler, cloud_name.str (), viewport);
- p->addPointCloud (cloud, color_handler, origin, orientation, cloud_name.str (), viewport);
+ //p->addPointCloud<pcl::PointXYZ> (cloud_xyz, color_handler, cloud_name, viewport);
+ p->addPointCloud (cloud, color_handler, origin, orientation, cloud_name, viewport);
}
// Set RGB color handler or label handler as default
- p->updateColorHandlerIndex (cloud_name.str (), (rgb_idx ? rgb_idx : label_idx));
+ p->updateColorHandlerIndex (cloud_name, (rgb_idx ? rgb_idx : label_idx));
}
// Additionally, add normals as a handler
geometry_handler.reset (new pcl::visualization::PointCloudGeometryHandlerSurfaceNormal<pcl::PCLPointCloud2> (cloud));
if (geometry_handler->isCapable ())
- //p->addPointCloud<pcl::PointXYZ> (cloud_xyz, geometry_handler, cloud_name.str (), viewport);
- p->addPointCloud (cloud, geometry_handler, origin, orientation, cloud_name.str (), viewport);
+ //p->addPointCloud<pcl::PointXYZ> (cloud_xyz, geometry_handler, cloud_name, viewport);
+ p->addPointCloud (cloud, geometry_handler, origin, orientation, cloud_name, viewport);
if (use_immediate_rendering)
// Set immediate mode rendering ON
- p->setPointCloudRenderingProperties (pcl::visualization::PCL_VISUALIZER_IMMEDIATE_RENDERING, 1.0, cloud_name.str ());
+ p->setPointCloudRenderingProperties (pcl::visualization::PCL_VISUALIZER_IMMEDIATE_RENDERING, 1.0, cloud_name);
// Change the cloud rendered point size
if (!psize.empty ())
- p->setPointCloudRenderingProperties (pcl::visualization::PCL_VISUALIZER_POINT_SIZE, psize.at (i), cloud_name.str ());
+ p->setPointCloudRenderingProperties (pcl::visualization::PCL_VISUALIZER_POINT_SIZE, psize.at (i), cloud_name);
// Change the cloud rendered opacity
if (!opaque.empty ())
- p->setPointCloudRenderingProperties (pcl::visualization::PCL_VISUALIZER_OPACITY, opaque.at (i), cloud_name.str ());
+ p->setPointCloudRenderingProperties (pcl::visualization::PCL_VISUALIZER_OPACITY, opaque.at (i), cloud_name);
// Reset camera viewpoint to center of cloud if camera parameters were not passed manually and this is the first loaded cloud
if (i == 0 && !p->cameraParamsSet () && !p->cameraFileLoaded ())
{
- p->resetCameraViewpoint (cloud_name.str ());
+ p->resetCameraViewpoint (cloud_name);
p->resetCamera ();
}
{
std::string str;
if (!p_file_indices.empty ())
- str = std::string (argv[p_file_indices.at (0)]);
+ str = argv[p_file_indices.at (0)];
else if (!vtk_file_indices.empty ())
- str = std::string (argv[vtk_file_indices.at (0)]);
+ str = argv[vtk_file_indices.at (0)];
for (std::size_t i = 1; i < p_file_indices.size (); ++i)
str += ", " + std::string (argv[p_file_indices.at (i)]);
#include <pcl/visualization/cloud_viewer.h>
#include <pcl/console/parse.h>
#include <pcl/common/time.h>
-#include "boost.h"
+#include <boost/date_time/gregorian/gregorian_types.hpp> // for date
+#include <boost/date_time/posix_time/posix_time.hpp> // for local_time
+#include <boost/date_time/posix_time/posix_time_types.hpp> // for time_duration
using namespace std::chrono_literals;
namespace bpt = boost::posix_time;
#include <pcl/console/time.h>
#include <pcl/console/print.h>
#include <pcl/console/parse.h>
-#include <pcl/io/vtk_lib_io.h>
+#include <vtkImageData.h> // for vtkImageData
+#include <vtkSmartPointer.h> // for vtkSmartPointer
+#include <vtkPNGReader.h> // for vtkPNGReader
#define RED_MULTIPLIER 0.299
#define GREEN_MULTIPLIER 0.587
mono_cloud.width = dimensions[0];
mono_cloud.height = dimensions[1]; // This indicates that the point cloud is organized
mono_cloud.is_dense = true;
- mono_cloud.points.resize (mono_cloud.width * mono_cloud.height);
+ mono_cloud.resize (mono_cloud.width * mono_cloud.height);
for (int y = 0; y < dimensions[1]; y++)
{
mono_cloud_u8.width = dimensions[0];
mono_cloud_u8.height = dimensions[1]; // This indicates that the point cloud is organized
mono_cloud_u8.is_dense = true;
- mono_cloud_u8.points.resize (mono_cloud_u8.width * mono_cloud_u8.height);
+ mono_cloud_u8.resize (mono_cloud_u8.width * mono_cloud_u8.height);
for (int y = 0; y < dimensions[1]; y++)
{
color_cloud.width = dimensions[0];
color_cloud.height = dimensions[1]; // This indicates that the point cloud is organized
color_cloud.is_dense = true;
- color_cloud.points.resize (color_cloud.width * color_cloud.height);
+ color_cloud.resize (color_cloud.width * color_cloud.height);
for (int y = 0; y < dimensions[1]; y++)
{
color_cloud.width = dimensions[0];
color_cloud.height = dimensions[1]; // This indicates that the point cloud is organized
color_cloud.is_dense = true;
- color_cloud.points.resize (color_cloud.width * color_cloud.height);
+ color_cloud.resize (color_cloud.width * color_cloud.height);
for (int y = 0; y < dimensions[1]; y++)
{
cloud.width = dimensions[0];
cloud.height = dimensions[1]; // This indicates that the point cloud is organized
cloud.is_dense = false;
- cloud.points.resize (cloud.width * cloud.height);
+ cloud.resize (cloud.width * cloud.height);
for (int y = 0; y < dimensions[1]; y++)
{
cloud.width = dimensions[0];
cloud.height = dimensions[1]; // This indicates that the point cloud is organized
cloud.is_dense = true;
- cloud.points.resize (cloud.width * cloud.height);
+ cloud.resize (cloud.width * cloud.height);
for (int y = 0; y < dimensions[1]; y++)
{
cloud.width = dimensions[0];
cloud.height = dimensions[1]; // This indicates that the point cloud is organized
cloud.is_dense = false;
- cloud.points.resize (cloud.width * cloud.height);
+ cloud.resize (cloud.width * cloud.height);
for (int y = 0; y < dimensions[1]; y++)
{
cloud.width = dimensions[0];
cloud.height = dimensions[1]; // This indicates that the point cloud is organized
cloud.is_dense = true;
- cloud.points.resize (cloud.width * cloud.height);
+ cloud.resize (cloud.width * cloud.height);
for (int y = 0; y < dimensions[1]; y++)
{
cloud8u.width = dimensions[0];
cloud8u.height = dimensions[1]; // This indicates that the point cloud is organized
cloud8u.is_dense = true;
- cloud8u.points.resize (cloud8u.width * cloud8u.height);
+ cloud8u.resize (cloud8u.width * cloud8u.height);
for (int y = 0; y < dimensions[1]; y++)
{
#include <pcl/filters/extract_indices.h>
#include <pcl/segmentation/approximate_progressive_morphological_filter.h>
#include <pcl/segmentation/progressive_morphological_filter.h>
+#include <boost/filesystem.hpp> // for path, exists, ...
+#include <boost/algorithm/string/case_conv.hpp> // for to_upper_copy
using namespace pcl;
using namespace pcl::io;
print_highlight (stderr, "Computing ");
- std::vector<int> ground;
+ pcl::Indices ground;
if (approximate)
{
compute (cloud, output, max_window_size, slope, max_distance, initial_distance, cell_size, base, exponential, approximate);
// Prepare output file name
- std::string filename = pcd_file;
- boost::trim (filename);
- boost::split (st, filename, boost::is_any_of ("/\\"), boost::token_compress_on);
-
+ std::string filename = boost::filesystem::path(pcd_file).filename().string();
+
// Save into the second file
- std::stringstream ss;
- ss << output_dir << "/" << st.at (st.size () - 1);
- saveCloud (ss.str (), output);
+ const std::string filepath = output_dir + '/' + filename;
+ saveCloud (filepath, output);
}
return (0);
}
#include <pcl/console/parse.h>
#include <pcl/console/time.h>
#include <pcl/filters/conditional_removal.h>
+#include <boost/filesystem.hpp> // for path, exists, ...
+#include <boost/algorithm/string/case_conv.hpp> // for to_upper_copy
using PointType = pcl::PointXYZ;
compute (cloud, output, radius, inside, keep_organized);
// Prepare output file name
- std::string filename = pcd_file;
- boost::trim (filename);
- boost::split (st, filename, boost::is_any_of ("/\\"), boost::token_compress_on);
-
+ std::string filename = boost::filesystem::path(pcd_file).filename().string();
+
// Save into the second file
- std::stringstream ss;
- ss << output_dir << "/" << st.at (st.size () - 1);
- saveCloud (ss.str (), output);
+ const std::string filepath = output_dir + '/' + filename;
+ saveCloud (filepath, output);
}
return (0);
}
#include <pcl/io/pcd_io.h>
#include <pcl/filters/voxel_grid.h>
#include <pcl/registration/icp.h>
-#include <pcl/registration/icp_nl.h>
#include <pcl/visualization/registration_visualizer.h>
int
#include <pcl/console/print.h>
#include <pcl/console/parse.h>
#include <pcl/console/time.h>
+#include <boost/filesystem.hpp> // for path, exists, ...
+#include <boost/algorithm/string/case_conv.hpp> // for to_upper_copy
using namespace pcl;
using namespace pcl::io;
sac.setMaxIterations (max_iterations);
bool res = sac.computeModel ();
- std::vector<int> inliers;
+ pcl::Indices inliers;
sac.getInliers (inliers);
Eigen::VectorXf coefficients;
sac.getModelCoefficients (coefficients);
compute (cloud, output, max_it, thresh, negative);
// Prepare output file name
- std::string filename = pcd_file;
- boost::trim (filename);
- boost::split (st, filename, boost::is_any_of ("/\\"), boost::token_compress_on);
+ std::string filename = boost::filesystem::path(pcd_file).filename().string();
// Save into the second file
- std::stringstream ss;
- ss << output_dir << "/" << st.at (st.size () - 1);
- saveCloud (ss.str (), output);
+ const std::string filepath = output_dir + '/' + filename;
+ saveCloud (filepath, output);
}
return (0);
}
#include <iostream>
#include <boost/filesystem.hpp>
-#include <pcl/pcl_base.h>
#include <pcl/point_types.h>
#include <pcl/point_cloud.h>
#include <pcl/console/parse.h>
#include <pcl/console/print.h>
#include <pcl/io/pcd_io.h>
-#include <pcl/io/vtk_lib_io.h>
+#include <vtkImageData.h> // for vtkImageData
#include <vtkSmartPointer.h>
#include <vtkImageViewer2.h>
#include <vtkTIFFReader.h>
color_point.b = xyzrgba_point.b = static_cast<std::uint8_t> (rgb_data->GetScalarComponentAsFloat(u,v,0,2));
xyzrgba_point.a = 0;
- pc_image.points.push_back(color_point);
- pc_depth.points.push_back(depth_point);
+ pc_image.push_back(color_point);
+ pc_depth.push_back(depth_point);
float d = depth_data->GetScalarComponentAsFloat(u,v,0,0);
depth_point.intensity = d;
{
xyzrgba_point.z = xyzrgba_point.x = xyzrgba_point.y = bad_point;
}
- pc_xyzrgba.points.push_back(xyzrgba_point);
+ pc_xyzrgba.push_back(xyzrgba_point);
} // for u
} // for v
- std::stringstream ss;
-
if(depth)
{
+ std::string depth_filename = "frame_" + time + "_depth.pcd";
if(use_output_path)
- ss << output_path << "/frame_" << time << "_depth.pcd";
- else
- ss << "frame_" << time << "_depth.pcd";
- pcl::io::savePCDFile (ss.str(), pc_depth, format);
- ss.str(""); //empty
+ depth_filename = output_path + '/' + depth_filename;
+ pcl::io::savePCDFile (depth_filename, pc_depth, format);
}
if(color)
{
+ std::string color_filename = "frame_" + time + "_color.pcd";
if(use_output_path)
- ss << output_path << "/frame_" << time << "_color.pcd";
- else
- ss << "frame_" << time << "_color.pcd";
- pcl::io::savePCDFile (ss.str(), pc_image, format);
- ss.str(""); //empty
+ color_filename = output_path + '/' + color_filename;
+ pcl::io::savePCDFile (color_filename, pc_image, format);
}
-
+ std::string xyzrgba_filename = "frame_" + time + "_xyzrgba.pcd";
if(use_output_path)
- ss << output_path << "/frame_" << time << "_xyzrgba.pcd";
- else
- ss << "frame_" << time << "_xyzrgba.pcd";
- pcl::io::savePCDFile (ss.str(), pc_xyzrgba, format);
+ xyzrgba_filename = output_path + '/' + xyzrgba_filename;
+ pcl::io::savePCDFile (xyzrgba_filename, pc_xyzrgba, format);
std::cout << "Saved " << time << " to pcd" << std::endl;
return;
#include <thread>
#include <pcl/common/time_trigger.h>
#include <pcl/common/time.h>
-#include <pcl/visualization/boost.h>
using namespace std::chrono_literals;
using namespace pcl;
std::vector<bool> foreground_mask (input->size (), false);
// Mask off points outside the specified near and far depth thresholds
- pcl::IndicesPtr indices (new std::vector<int>);
+ pcl::IndicesPtr indices (new pcl::Indices);
for (std::size_t i = 0; i < input->size (); ++i)
{
const float z = (*input)[i].z;
seg.segment (*inliers, *coefficients);
// Mask off the plane inliers
- for (const int &index : inliers->indices)
+ for (const auto &index : inliers->indices)
foreground_mask[index] = false;
// Mask off any foreground points that are too high above the detected plane
#include <pcl/PCLPointCloud2.h>
#include <pcl/io/pcd_io.h>
-#include <pcl/features/fpfh.h>
#include <pcl/console/print.h>
#include <pcl/console/parse.h>
#include <pcl/console/time.h>
int z_idx = pcl::getFieldIndex (cloud, "z");
Eigen::Array3i xyz_offset (cloud.fields[x_idx].offset, cloud.fields[y_idx].offset, cloud.fields[z_idx].offset);
- for (std::uint32_t cp = 0; cp < cloud.width * cloud.height; ++cp)
+ for (uindex_t cp = 0; cp < cloud.width * cloud.height; ++cp)
{
// Assume all 3 fields are the same (XYZ)
assert ((cloud.fields[x_idx].datatype == cloud.fields[y_idx].datatype));
#include <pcl/console/print.h>
#include <pcl/console/parse.h>
#include <pcl/console/time.h>
+#include <pcl/filters/filter.h> // for removeNaNFromPointCloud
#include <pcl/segmentation/unary_classifier.h>
+#include <boost/filesystem.hpp> // for path, exists, ...
using namespace pcl;
using namespace pcl::io;
if (!boost::filesystem::is_directory (it->status ()) &&
boost::filesystem::extension (it->path ()) == ".pcd")
{
- std::stringstream ss;
- ss << it->path ().string ();
+ const std::string path = it->path ().string ();
- print_highlight ("Loading %s \n", ss.str ().c_str ());
+ print_highlight ("Loading %s \n", path.c_str ());
FeatureT::Ptr features (new FeatureT);
- if (loadPCDFile (ss.str (), *features) < 0)
+ if (loadPCDFile (path, *features) < 0)
return false;
out.push_back (features);
return (-1);
// TODO:: make this as an optional argument ??
- std::vector<int> tmp_indices;
+ pcl::Indices tmp_indices;
pcl::removeNaNFromPointCloud (*cloud, *cloud, tmp_indices);
// parse optional input arguments from the command line
#include <boost/filesystem.hpp>
#include <algorithm>
#include <string>
-#include <pcl/io/vtk_io.h>
using namespace pcl;
using namespace pcl::io;
us.getRemovedIndices(removed_indices);
std::sort(removed_indices.indices.begin(), removed_indices.indices.end());
// Compute retained indices as a set difference between all and removed
- std::vector<int> retained;
+ pcl::Indices retained;
std::set_difference(input_indices->begin(),
input_indices->end(),
removed_indices.indices.begin(),
*
*/
-#include <pcl/PCLPointCloud2.h>
#include <pcl/io/pcd_io.h>
#include <pcl/features/vfh.h>
#include <pcl/console/print.h>
#include <pcl/memory.h> // for pcl::make_shared
#include <pcl/point_types.h>
#include <pcl/console/parse.h>
-#include <pcl/visualization/vtk.h>
+
+#include <vtkGeneralTransform.h>
+#include <vtkPlatonicSolidSource.h>
+#include <vtkLoopSubdivisionFilter.h>
+#include <vtkCellLocator.h>
+#include <vtkMath.h>
#include <boost/algorithm/string.hpp> // for boost::is_any_of, boost::split, boost::token_compress_on, boost::trim
#include <boost/filesystem.hpp> // for boost::filesystem::create_directories, boost::filesystem::exists, boost::filesystem::extension, boost::filesystem::path
" -view_point <x,y,z> : set the camera viewpoint from where the acquisition will take place\n"
" -target_point <x,y,z> : the target point that the camera should look at (default: 0, 0, 0)\n"
" -organized <0|1> : create an organized, grid-like point cloud of width x height (1), or keep it unorganized with height = 1 (0)\n"
+ " -scale <double> : scaling factor to the points XYZ (default 1(m), 1000(mm))\n"
" -noise <0|1> : add gaussian noise (1) or keep the model noiseless (0)\n"
" -noise_std <x> : use X times the standard deviation\n"
"");
return (-1);
}
- std::string filename;
+
// Parse the command line arguments for .vtk or .ply files
std::vector<int> p_file_indices_vtk = console::parse_file_extension_argument (argc, argv, ".vtk");
std::vector<int> p_file_indices_ply = console::parse_file_extension_argument (argc, argv, ".ply");
console::parse_3x_arguments (argc, argv, "-target_point", tx, ty, tz);
int organized = 0;
console::parse_argument (argc, argv, "-organized", organized);
+ double scale = 1;
+ console::parse_argument (argc, argv, "-scale", scale);
+
if (organized)
PCL_INFO ("Saving an organized dataset.\n");
else
return (-1);
}
- std::stringstream filename_stream;
+ std::string filename;
if (!p_file_indices_ply.empty ())
- filename_stream << argv[p_file_indices_ply.at (0)];
+ filename = argv[p_file_indices_ply.at (0)];
else
- filename_stream << argv[p_file_indices_vtk.at (0)];
-
- filename = filename_stream.str ();
+ filename = argv[p_file_indices_vtk.at (0)];
data = loadDataSet (filename.c_str ());
int sid = -1;
for (int i = 0; i < number_of_points; i++)
{
+ // Clear cloud for next view scan
+ cloud.clear();
+
sphere->GetPoint (i, eye);
if (std::abs(eye[0]) < EPS) eye[0] = 0;
if (std::abs(eye[1]) < EPS) eye[1] = 0;
pcl::PointWithViewpoint pt;
if (object_coordinates)
{
- pt.x = static_cast<float> (x[0]);
- pt.y = static_cast<float> (x[1]);
- pt.z = static_cast<float> (x[2]);
+ pt.x = static_cast<float> (x[0] * scale);
+ pt.y = static_cast<float> (x[1] * scale);
+ pt.z = static_cast<float> (x[2] * scale);
pt.vp_x = static_cast<float> (eye[0]);
pt.vp_y = static_cast<float> (eye[1]);
pt.vp_z = static_cast<float> (eye[2]);
// z axis is the viewray
// y axis is up
// x axis is -right (negative because z*y=-x but viewray*up=right)
- pt.x = static_cast<float> (-right[0]*x[1] + up[0]*x[2] + viewray[0]*x[0] + eye[0]);
- pt.y = static_cast<float> (-right[1]*x[1] + up[1]*x[2] + viewray[1]*x[0] + eye[1]);
- pt.z = static_cast<float> (-right[2]*x[1] + up[2]*x[2] + viewray[2]*x[0] + eye[2]);
+ pt.x = static_cast<float> ((-right[0]*x[1] + up[0]*x[2] + viewray[0]*x[0] + eye[0])* scale);
+ pt.y = static_cast<float> ((-right[1]*x[1] + up[1]*x[2] + viewray[1]*x[0] + eye[1]) * scale);
+ pt.z = static_cast<float> ((-right[2]*x[1] + up[2]*x[2] + viewray[2]*x[0] + eye[2]) * scale);
pt.vp_x = pt.vp_y = pt.vp_z = 0.0f;
}
- cloud.points.push_back (pt);
+ cloud.push_back (pt);
}
else
if (organized)
pt.vp_x = static_cast<float> (eye[0]);
pt.vp_y = static_cast<float> (eye[1]);
pt.vp_z = static_cast<float> (eye[2]);
- cloud.points.push_back (pt);
+ cloud.push_back (pt);
}
} // Horizontal
} // Vertical
boost::trim (filename);
boost::split (st, filename, boost::is_any_of ("/\\"), boost::token_compress_on);
- std::stringstream ss;
- std::string output_dir = st.at (st.size () - 1);
- ss << output_dir << "_output";
+ const std::string output_dir = st.at (st.size () - 1) + "_output";
- boost::filesystem::path outpath (ss.str ());
+ boost::filesystem::path outpath (output_dir);
if (!boost::filesystem::exists (outpath))
{
if (!boost::filesystem::create_directories (outpath))
{
- PCL_ERROR ("Error creating directory %s.\n", ss.str ().c_str ());
+ PCL_ERROR ("Error creating directory %s.\n", output_dir.c_str ());
return (-1);
}
- PCL_INFO ("Creating directory %s\n", ss.str ().c_str ());
+ PCL_INFO ("Creating directory %s\n", output_dir.c_str ());
}
- fname = ss.str () + "/" + seq + ".pcd";
+ fname = output_dir + '/' + seq + ".pcd";
if (organized)
{
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
#include <pcl/common/time.h> //fps calculations
-#include <pcl/io/pcd_io.h>
#include <pcl/io/hdl_grabber.h>
#include <pcl/io/vlp_grabber.h>
#include <pcl/visualization/point_cloud_color_handlers.h>
#include <pcl/visualization/cloud_viewer.h>
#include <pcl/visualization/image_viewer.h>
-#include <pcl/io/openni_camera/openni_driver.h>
#include <pcl/console/parse.h>
-#include <pcl/visualization/boost.h>
#include <boost/algorithm/string.hpp>
#include <pcl/console/print.h>
#include <pcl/console/parse.h>
#include <pcl/console/time.h>
-#include <pcl/common/transforms.h>
#include <pcl/visualization/pcl_visualizer.h>
#include <pcl/filters/voxel_grid_occlusion_estimation.h>
#include <vtkCubeSource.h>
#include <pcl/io/pcd_io.h>
#include <pcl/console/print.h>
#include <pcl/console/parse.h>
+#include <boost/algorithm/string/split.hpp> // for split
using namespace pcl;
using namespace pcl::io;
#pragma once
#include <pcl/search/octree.h>
-#include <pcl/search/search.h>
#include <pcl/tracking/nearest_pair_point_cloud_coherence.h>
namespace pcl {
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Point Cloud Library (PCL) - www.pointclouds.org
- * Copyright (c) 2012-, Open Perception, Inc.
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of the copyright holder(s) nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * $Id: lmeds.h 1370 2011-06-19 01:06:01Z jspricke $
- *
- */
-
-#pragma once
-
-#include <pcl/pcl_macros.h>
-
-PCL_DEPRECATED_HEADER(1, 12, "")
-
-#include <boost/random.hpp>
double val = 0.0;
// for (std::size_t i = 0; i < indices->size (); i++)
for (const auto& point : *cloud) {
- int k_index = 0;
+ pcl::index_t k_index = 0;
float k_distance = 0.0;
// PointInT input_point = cloud->points[(*indices)[i]];
PointInT input_point = point;
#include <pcl/tracking/distance_coherence.h>
-#include <Eigen/Dense>
-
namespace pcl {
namespace tracking {
template <typename PointInT>
#include <pcl/tracking/hsv_color_coherence.h>
-#include <Eigen/Dense>
-
namespace pcl {
namespace tracking {
// utility
else {
std::vector<IndicesPtr> indices_list(particle_num_);
for (int i = 0; i < particle_num_; i++) {
- indices_list[i] = IndicesPtr(new std::vector<int>);
+ indices_list[i] = IndicesPtr(new pcl::Indices);
}
// clang-format off
#pragma omp parallel for \
#define PCL_TRACKING_IMPL_NEAREST_PAIR_POINT_CLOUD_COHERENCE_H_
#include <pcl/search/kdtree.h>
-#include <pcl/search/organized.h>
#include <pcl/tracking/nearest_pair_point_cloud_coherence.h>
namespace pcl {
// for (std::size_t i = 0; i < indices->size (); i++)
for (std::size_t i = 0; i < cloud->size(); i++) {
PointInT input_point = (*cloud)[i];
- std::vector<int> k_indices(1);
+ pcl::Indices k_indices(1);
std::vector<float> k_distances(1);
search_->nearestKSearch(input_point, 1, k_indices, k_distances);
int k_index = k_indices[0];
{
change_detector_->setInputCloud(input);
change_detector_->addPointsFromInputCloud();
- std::vector<int> newPointIdxVector;
+ pcl::Indices newPointIdxVector;
change_detector_->getPointIndicesFromNewVoxels(newPointIdxVector,
change_detector_filter_);
change_detector_->switchBuffers();
}
else {
for (std::size_t i = 0; i < particles_->size(); i++) {
- IndicesPtr indices(new std::vector<int>);
+ IndicesPtr indices(new pcl::Indices);
computeTransformedPointCloudWithNormal(
(*particles_)[i], *indices, *transed_reference_vector_[i]);
}
coherence_->setTargetCloud(coherence_input);
coherence_->initCompute();
for (std::size_t i = 0; i < particles_->size(); i++) {
- IndicesPtr indices(new std::vector<int>);
+ IndicesPtr indices(new pcl::Indices);
coherence_->compute(
transed_reference_vector_[i], indices, (*particles_)[i].weight);
}
template <typename PointInT, typename StateT>
void
ParticleFilterTracker<PointInT, StateT>::computeTransformedPointCloud(
- const StateT& hypothesis, std::vector<int>& indices, PointCloudIn& cloud)
+ const StateT& hypothesis, pcl::Indices& indices, PointCloudIn& cloud)
{
if (use_normal_)
computeTransformedPointCloudWithNormal(hypothesis, indices, cloud);
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointInT, typename StateT>
+template <typename PointT, pcl::traits::HasNormal<PointT>>
void
ParticleFilterTracker<PointInT, StateT>::computeTransformedPointCloudWithNormal(
-#ifdef PCL_TRACKING_NORMAL_SUPPORTED
- const StateT& hypothesis, std::vector<int>& indices, PointCloudIn& cloud)
-#else
- const StateT&, std::vector<int>&, PointCloudIn&)
-#endif
+ const StateT& hypothesis, pcl::Indices& indices, PointCloudIn& cloud)
{
-#ifdef PCL_TRACKING_NORMAL_SUPPORTED
const Eigen::Affine3f trans = toEigenMatrix(hypothesis);
// destructively assigns to cloud
pcl::transformPointCloudWithNormals<PointInT>(*ref_, cloud, trans);
if (theta > occlusion_angle_thr_)
indices.push_back(i);
}
-#else
- PCL_WARN("[pcl::%s::computeTransformedPointCloudWithoutNormal] "
- "use_normal_ == true is not supported in this Point Type.",
- getClassName().c_str());
-#endif
}
template <typename PointInT, typename StateT>
else {
std::vector<IndicesPtr> indices_list(particle_num_);
for (int i = 0; i < particle_num_; i++) {
- indices_list[i] = IndicesPtr(new std::vector<int>);
+ indices_list[i] = IndicesPtr(new pcl::Indices);
}
// clang-format off
#pragma omp parallel for \
keypoints_ = p;
}
- keypoints_status_.reset(new pcl::PointIndices);
- keypoints_status_->indices.resize(keypoints_->size(), 0);
+ keypoints_status_.reset(new std::vector<int>);
+ keypoints_status_->resize(keypoints_->size(), 0);
}
///////////////////////////////////////////////////////////////////////////////////////////////
if (!input_->isOrganized()) {
PCL_ERROR(
- "[pcl::tracking::%s::initCompute] Need an organized point cloud to proceed!",
+ "[pcl::tracking::%s::initCompute] Need an organized point cloud to proceed!\n",
tracker_name_.c_str());
return (false);
}
if (!keypoints_ || keypoints_->empty()) {
- PCL_ERROR("[pcl::tracking::%s::initCompute] No keypoints aborting!",
+ PCL_ERROR("[pcl::tracking::%s::initCompute] No keypoints aborting!\n",
tracker_name_.c_str());
return (false);
}
if (nb_levels_ < 2) {
PCL_ERROR("[pcl::tracking::%s::initCompute] Number of pyramid levels should be "
- "at least 2!",
+ "at least 2!\n",
tracker_name_.c_str());
return (false);
}
if (nb_levels_ > 5) {
PCL_ERROR("[pcl::tracking::%s::initCompute] Number of pyramid levels should not "
- "exceed 5!",
+ "exceed 5!\n",
tracker_name_.c_str());
return (false);
}
ref_ = input_;
ref_pyramid_ = pyramid;
keypoints_ = keypoints;
- keypoints_status_->indices = status;
+ *keypoints_status_ = status;
}
} // namespace tracking
#ifndef PCL_TRACKING_IMPL_TRACKER_H_
#define PCL_TRACKING_IMPL_TRACKER_H_
-#include <pcl/common/eigen.h>
#include <pcl/tracking/tracker.h>
-#include <ctime>
-
namespace pcl {
namespace tracking {
template <typename PointInT, typename StateT>
#include <pcl/memory.h>
#include <pcl/pcl_macros.h>
-#include <ctime>
-
namespace pcl {
namespace tracking {
struct _ParticleXYZRPY {
#include <pcl/tracking/tracker.h>
#include <pcl/tracking/tracking.h>
#include <pcl/memory.h>
-
-#include <Eigen/Dense>
+#include <pcl/point_types.h>
namespace pcl {
namespace tracking {
inline void
setUseNormal(bool use_normal)
{
- use_normal_ = use_normal;
+ if (traits::has_normal_v<PointInT> || !use_normal) {
+ use_normal_ = use_normal;
+ return;
+ }
+ PCL_WARN("[pcl::%s::setUseNormal] "
+ "use_normal_ == true is not supported in this Point Type.\n",
+ getClassName().c_str());
+ use_normal_ = false;
}
/** \brief Get the value of use_normal_. */
**/
void
computeTransformedPointCloud(const StateT& hypothesis,
- std::vector<int>& indices,
+ pcl::Indices& indices,
PointCloudIn& cloud);
+#ifdef DOXYGEN_ONLY
/** \brief Compute a reference pointcloud transformed to the pose that hypothesis
* represents and calculate indices taking occlusion into account.
* \param[in] hypothesis a particle which represents a hypothesis.
**/
void
computeTransformedPointCloudWithNormal(const StateT& hypothesis,
- std::vector<int>& indices,
+ pcl::Indices& indices,
+ PointCloudIn& cloud);
+#else
+ template <typename PointT = PointInT, traits::HasNormal<PointT> = true>
+ void
+ computeTransformedPointCloudWithNormal(const StateT& hypothesis,
+ pcl::Indices& indices,
PointCloudIn& cloud);
+ template <typename PointT = PointInT, traits::HasNoNormal<PointT> = true>
+ void
+ computeTransformedPointCloudWithNormal(const StateT&, pcl::Indices&, PointCloudIn&)
+ {
+ PCL_WARN("[pcl::%s::computeTransformedPointCloudWithNormal] "
+ "use_normal_ == true is not supported in this Point Type.\n",
+ getClassName().c_str());
+ }
+#endif
/** \brief Compute a reference pointcloud transformed to the pose that hypothesis
* represents and calculate indices without taking occlusion into account.
* Status == -1 --> point is out of bond;
* Status == -2 --> optical flow can not be computed for this point.
*/
+ PCL_DEPRECATED(1, 15, "use getStatusOfPointsToTrack instead")
inline pcl::PointIndicesConstPtr
getPointsToTrackStatus() const
+ {
+ pcl::PointIndicesPtr res(new pcl::PointIndices);
+ res->indices.insert(
+ res->indices.end(), keypoints_status_->begin(), keypoints_status_->end());
+ return (res);
+ }
+
+ /** \return the status of points to track.
+ * Status == 0 --> points successfully tracked;
+ * Status < 0 --> point is lost;
+ * Status == -1 --> point is out of bond;
+ * Status == -2 --> optical flow can not be computed for this point.
+ */
+ inline pcl::shared_ptr<const std::vector<int>>
+ getStatusOfPointsToTrack() const
{
return (keypoints_status_);
}
/** \brief detected keypoints 2D coordinates */
pcl::PointCloud<pcl::PointUV>::ConstPtr keypoints_;
/** \brief status of keypoints of t-1 at t */
- pcl::PointIndicesPtr keypoints_status_;
+ pcl::shared_ptr<std::vector<int>> keypoints_status_;
/** \brief number of points to detect */
std::size_t keypoints_nbr_;
/** \brief tracking width */
#ifndef PCL_NO_PRECOMPILE
#include <pcl/impl/instantiate.hpp>
#include <pcl/point_types.h>
-#define PCL_TRACKING_NORMAL_SUPPORTED
// clang-format off
PCL_INSTANTIATE_PRODUCT(KLDAdaptiveParticleFilterTracker,
((pcl::PointNormal)
(pcl::PointXYZINormal)
- (pcl::PointXYZRGBNormal))
- (PCL_STATE_POINT_TYPES))
-PCL_INSTANTIATE_PRODUCT(KLDAdaptiveParticleFilterOMPTracker,
- ((pcl::PointNormal)
- (pcl::PointXYZINormal)
- (pcl::PointXYZRGBNormal))
- (PCL_STATE_POINT_TYPES))
-// clang-format on
-#undef PCL_TRACKING_NORMAL_SUPPORTED
-// clang-format off
-PCL_INSTANTIATE_PRODUCT(KLDAdaptiveParticleFilterOMPTracker,
- ((pcl::PointXYZ)
+ (pcl::PointXYZRGBNormal)
+ (pcl::PointXYZ)
(pcl::PointXYZI)
(pcl::PointXYZRGBA)
(pcl::PointXYZRGB)
(pcl::PointWithViewpoint)
(pcl::PointWithScale))
(PCL_STATE_POINT_TYPES))
-PCL_INSTANTIATE_PRODUCT(KLDAdaptiveParticleFilterTracker,
- ((pcl::PointXYZ)
+
+PCL_INSTANTIATE_PRODUCT(KLDAdaptiveParticleFilterOMPTracker,
+ ((pcl::PointNormal)
+ (pcl::PointXYZINormal)
+ (pcl::PointXYZRGBNormal)
+ (pcl::PointXYZ)
(pcl::PointXYZI)
(pcl::PointXYZRGBA)
(pcl::PointXYZRGB)
#ifndef PCL_NO_PRECOMPILE
#include <pcl/impl/instantiate.hpp>
#include <pcl/point_types.h>
-#define PCL_TRACKING_NORMAL_SUPPORTED
// clang-format off
PCL_INSTANTIATE_PRODUCT(ParticleFilterTracker,
((pcl::PointNormal)
(pcl::PointXYZINormal)
- (pcl::PointXYZRGBNormal))
- (PCL_STATE_POINT_TYPES))
-PCL_INSTANTIATE_PRODUCT(ParticleFilterOMPTracker,
- ((pcl::PointNormal)
- (pcl::PointXYZINormal)
- (pcl::PointXYZRGBNormal))
- (PCL_STATE_POINT_TYPES))
-// clang-format on
-#undef PCL_TRACKING_NORMAL_SUPPORTED
-// clang-format off
-PCL_INSTANTIATE_PRODUCT(ParticleFilterTracker,
- ((pcl::PointXYZ)
+ (pcl::PointXYZRGBNormal)
+ (pcl::PointXYZ)
(pcl::PointXYZI)
(pcl::PointXYZRGBA)
(pcl::PointXYZRGB)
(pcl::PointWithViewpoint)
(pcl::PointWithScale))
(PCL_STATE_POINT_TYPES))
+
PCL_INSTANTIATE_PRODUCT(ParticleFilterOMPTracker,
- ((pcl::PointXYZ)
+ ((pcl::PointNormal)
+ (pcl::PointXYZINormal)
+ (pcl::PointXYZRGBNormal)
+ (pcl::PointXYZ)
(pcl::PointXYZI)
(pcl::PointXYZRGBA)
(pcl::PointXYZRGB)
"src/vtk/vtkVertexBufferObject.cxx"
"src/vtk/vtkVertexBufferObjectMapper.cxx"
)
+
+ # Code in this module may use deprecated declarations when using OpenGLv1
+ # Add the GCC exclusive rules for -Werror only for OpenGLv1 compile to avoid build interruption
+ if(CMAKE_COMPILER_IS_GNUCXX)
+ add_compile_options("-Wno-error=deprecated-declarations")
+ endif()
+endif()
+
+if(NOT (${VTK_VERSION} VERSION_LESS 9.0))
+ if(NOT (";${VTK_AVAILABLE_COMPONENTS};" MATCHES ";RenderingContextOpenGL2;"))
+ list(REMOVE_ITEM srcs
+ src/pcl_painter2D.cpp
+ )
+ endif()
endif()
set(incs
"include/pcl/${SUBSYS_NAME}/vtk.h"
"include/pcl/${SUBSYS_NAME}/simple_buffer_visualizer.h"
"include/pcl/${SUBSYS_NAME}/pcl_plotter.h"
+ "include/pcl/${SUBSYS_NAME}/qvtk_compatibility.h"
)
set(common_incs
"include/pcl/${SUBSYS_NAME}/vtk/pcl_image_canvas_source_2d.h"
"include/pcl/${SUBSYS_NAME}/vtk/pcl_context_item.h"
"include/pcl/${SUBSYS_NAME}/vtk/vtkRenderWindowInteractorFix.h"
+ "include/pcl/${SUBSYS_NAME}/vtk/pcl_vtk_compatibility.h"
)
if(VTK_RENDERING_BACKEND_OPENGL_VERSION VERSION_LESS 2)
)
endif()
+if(NOT (${VTK_VERSION} VERSION_LESS 9.0))
+ if(NOT (";${VTK_AVAILABLE_COMPONENTS};" MATCHES ";RenderingContextOpenGL2;"))
+
+ list(REMOVE_ITEM incs
+ "include/pcl/${SUBSYS_NAME}/pcl_painter2D.h"
+ )
+ endif()
+endif()
+
# on apple, a workaround is used for the cocoa render window interactor
if(APPLE)
list(APPEND srcs
set(LIB_NAME "pcl_${SUBSYS_NAME}")
PCL_ADD_LIBRARY(${LIB_NAME} COMPONENT ${SUBSYS_NAME} SOURCES ${srcs} ${incs} ${common_incs} ${impl_incs} ${common_impl_incs} ${vtk_incs})
-target_include_directories("${LIB_NAME}" SYSTEM PUBLIC ${VTK_INCLUDE_DIRS})
-
# apple workaround (continued)
if(APPLE)
target_link_libraries("${LIB_NAME}" "-framework Cocoa")
endif()
-target_link_libraries("${LIB_NAME}" pcl_common pcl_io pcl_kdtree ${VTK_LIBRARIES} ${OPENGL_LIBRARIES})
+target_link_libraries("${LIB_NAME}" pcl_common pcl_io pcl_kdtree ${OPENGL_LIBRARIES})
+
+if(${VTK_VERSION} VERSION_LESS 9.0)
+ target_include_directories("${LIB_NAME}" SYSTEM PUBLIC ${VTK_INCLUDE_DIRS})
+ target_link_libraries("${LIB_NAME}" ${VTK_LIBRARIES})
+else()
+ #Some libs are referenced through depending on IO
+ target_link_libraries("${LIB_NAME}"
+ VTK::ChartsCore
+ VTK::CommonColor
+ VTK::CommonComputationalGeometry
+ VTK::CommonDataModel
+ VTK::FiltersExtraction
+ VTK::FiltersGeometry
+ VTK::FiltersGeneral
+ VTK::FiltersModeling
+ VTK::FiltersSources
+ VTK::IOImage
+ VTK::IOPLY
+ VTK::ImagingSources
+ VTK::InteractionImage
+ VTK::InteractionStyle
+ VTK::RenderingAnnotation
+ VTK::RenderingContext2D
+ VTK::RenderingFreeType
+ VTK::RenderingLOD
+ VTK::RenderingOpenGL2
+ VTK::ViewsContext2D)
+
+ if(";${VTK_AVAILABLE_COMPONENTS};" MATCHES ";RenderingContextOpenGL2;")
+ target_link_libraries("${LIB_NAME}" VTK::RenderingContextOpenGL2)
+ endif()
+endif()
set(EXT_DEPS "")
if(WITH_OPENNI)
if(WITH_RSSDK)
list(APPEND EXT_DEPS rssdk)
endif()
+
PCL_MAKE_PKGCONFIG(${LIB_NAME} COMPONENT ${SUBSYS_NAME} DESC ${SUBSYS_DESC} PCL_DEPS ${SUBSYS_DEPS} EXT_DEPS ${EXT_DEPS})
# Install include files
PCL_ADD_INCLUDES("${SUBSYS_NAME}" "${SUBSYS_NAME}/common/impl" ${common_impl_incs})
PCL_ADD_INCLUDES("${SUBSYS_NAME}" "${SUBSYS_NAME}/vtk" ${vtk_incs})
+#TODO: Update when CMAKE 3.10 is available
+if(NOT (${VTK_VERSION} VERSION_LESS 9.0))
+ vtk_module_autoinit(TARGETS "${LIB_NAME}"
+ MODULES VTK::RenderingOpenGL2
+ VTK::RenderingFreeType)
+endif()
+
if(BUILD_TESTS)
add_subdirectory(test)
endif()
#include <pcl/pcl_macros.h>
-#include <vector>
-
namespace pcl
{
namespace visualization
class PCL_EXPORTS AreaPickingEvent
{
public:
- AreaPickingEvent (int nb_points, const std::vector<int>& indices)
+ AreaPickingEvent (int nb_points, const pcl::Indices& indices)
: nb_points_ (nb_points)
, indices_ (indices)
{}
* \return true, if the area selected by the user contains points, false otherwise
*/
inline bool
- getPointsIndices (std::vector<int>& indices) const
+ getPointsIndices (pcl::Indices& indices) const
{
if (nb_points_ <= 0)
return (false);
private:
int nb_points_;
- std::vector<int> indices_;
+ pcl::Indices indices_;
};
} //namespace visualization
} //namespace pcl
# pragma GCC system_header
#endif
-//https://bugreports.qt-project.org/browse/QTBUG-22829
-#ifndef Q_MOC_RUN
#include <boost/shared_array.hpp>
#define BOOST_PARAMETER_MAX_ARITY 7
#include <boost/signals2.hpp>
#include <boost/algorithm/string/split.hpp>
#include <boost/algorithm/string/classification.hpp>
#include <boost/foreach.hpp>
-#ifndef Q_MOC_RUN
#include <boost/date_time/posix_time/posix_time.hpp>
-#endif
#include <boost/filesystem.hpp>
-#endif
#pragma once
-#include <pcl/visualization/boost.h>
-#include <pcl/visualization/point_cloud_handlers.h>
+#include <pcl/visualization/point_cloud_geometry_handlers.h> // for PointCloudGeometryHandler
+#include <pcl/visualization/point_cloud_color_handlers.h> // for PointCloudColorHandler
#include <pcl/PCLPointCloud2.h>
#include <vtkLODActor.h>
#include <vtkSmartPointer.h>
+#include <vtkIdTypeArray.h>
-#include <map>
#include <unordered_map>
#include <vector>
#pragma GCC system_header
#endif
+#include <Eigen/Core> // for Matrix, ...
+
#include <pcl/pcl_macros.h>
-#include <pcl/visualization/eigen.h>
#include <vtkMatrix4x4.h>
#include <vtkSmartPointer.h>
#include <vtkLookupTable.h>
#include <pcl/pcl_config.h>
#include <pcl/pcl_macros.h>
-#include <iostream>
-#include <cmath>
#include <limits>
namespace pcl
#pragma once
#include <pcl/visualization/common/actor_map.h>
-#include <pcl/console/print.h>
class vtkPolyData;
* \ingroup visualization
*/
PCL_EXPORTS void
- getCorrespondingPointCloud (vtkPolyData *src, const pcl::PointCloud<pcl::PointXYZ> &tgt, std::vector<int> &indices);
+ getCorrespondingPointCloud (vtkPolyData *src, const pcl::PointCloud<pcl::PointXYZ> &tgt, pcl::Indices &indices);
/** \brief Saves the vtk-formatted Point Cloud data into a set of files, based on whether
* the data comes from previously loaded PCD files. The PCD files are matched using the
#include <map>
#include <string>
+#include <vtkXYPlotActor.h>
+
template <typename T> class vtkSmartPointer;
-class vtkXYPlotActor;
class vtkRenderer;
class vtkRenderWindow;
class vtkRenderWindowInteractor;
#pragma once
#include <pcl/point_cloud.h>
-#include <pcl/visualization/eigen.h>
template <typename T> class vtkSmartPointer;
class vtkDataSet;
createCube (double x_min, double x_max,
double y_min, double y_max,
double z_min, double z_max);
+
+ /** \brief Create an ellipsoid shape from the given parameters.
+ *
+ * \param[in] transform a transformation to apply to the ellipsoid from 0,0,0
+ * \param[in] radius_x the ellipsoid's radius along its local x-axis
+ * \param[in] radius_y the ellipsoid's radius along its local y-axis
+ * \param[in] radius_z the ellipsoid's radius along its local z-axis
+ * \ingroup visualization
+ */
+ PCL_EXPORTS vtkSmartPointer<vtkDataSet>
+ createEllipsoid (const Eigen::Isometry3d &transform,
+ double radius_x, double radius_y, double radius_z);
/** \brief Allocate a new unstructured grid smartpointer. For internal use only.
* \param[out] polydata the resultant unstructured grid.
#if defined __GNUC__
# pragma GCC system_header
#endif
+PCL_DEPRECATED_HEADER(1, 15, "Please include the needed eigen headers directly.")
#include <Eigen/Geometry>
#include <Eigen/Dense>
template <typename PointT> bool
addFeatureHistogram (const pcl::PointCloud<PointT> &cloud,
const std::string &field_name,
- const int index,
+ const pcl::index_t index,
const std::string &id = "cloud", int win_width = 640, int win_height = 200);
/** \brief Add a histogram feature to screen as a separate window.
bool
addFeatureHistogram (const pcl::PCLPointCloud2 &cloud,
const std::string &field_name,
- const int index,
+ const pcl::index_t index,
const std::string &id = "cloud", int win_width = 640, int win_height = 200);
/** \brief Update a histogram feature that is already on screen, with a cloud containing a single histogram.
*/
template <typename PointT> bool
updateFeatureHistogram (const pcl::PointCloud<PointT> &cloud, const std::string &field_name,
- const int index, const std::string &id = "cloud");
+ const pcl::index_t index, const std::string &id = "cloud");
/** \brief Update a histogram feature that is already on screen, with a cloud containing a single histogram.
*/
bool
updateFeatureHistogram (const pcl::PCLPointCloud2 &cloud,
- const std::string &field_name, const int index,
+ const std::string &field_name, const pcl::index_t index,
const std::string &id = "cloud");
PCLHistogramVisualizer::addFeatureHistogram (
const pcl::PointCloud<PointT> &cloud,
const std::string &field_name,
- const int index,
+ const pcl::index_t index,
const std::string &id, int win_width, int win_height)
{
if (index < 0 || index >= cloud.size ())
// Parse the cloud data and store it in the array
double xy[2];
- for (std::uint32_t d = 0; d < fields[field_idx].count; ++d)
+ for (uindex_t d = 0; d < fields[field_idx].count; ++d)
{
xy[0] = d;
//xy[1] = cloud[index].histogram[d];
template <typename PointT> bool
PCLHistogramVisualizer::updateFeatureHistogram (
- const pcl::PointCloud<PointT> &cloud, const std::string &field_name, const int index,
+ const pcl::PointCloud<PointT> &cloud, const std::string &field_name, const pcl::index_t index,
const std::string &id)
{
if (index < 0 || index >= cloud.size ())
PCLPlotter::addFeatureHistogram (
const pcl::PointCloud<PointT> &cloud,
const std::string &field_name,
- const int index,
+ const pcl::index_t index,
const std::string &id, int win_width, int win_height)
{
if (index < 0 || index >= cloud.size ())
#include <vtkLODActor.h>
#include <vtkLineSource.h>
+#include <pcl/common/utils.h> // pcl::utils::ignore
#include <pcl/visualization/common/shapes.h>
// Support for VTK 7.1 upwards
points->SetNumberOfPoints (nr_points);
}
+#ifdef VTK_CELL_ARRAY_V2
+ // TODO: Remove when VTK 6,7,8 is unsupported
+ pcl::utils::ignore(initcells);
+
+ auto numOfCells = vertices->GetNumberOfCells();
+
+ // If we have less cells than points, add new cells.
+ if (numOfCells < nr_points)
+ {
+ for (int i = numOfCells; i < nr_points; i++)
+ {
+ vertices->InsertNextCell(1);
+ vertices->InsertCellPoint(i);
+ }
+ }
+ // if we too many cells than points, set size (doesn't free excessive memory)
+ else if (numOfCells > nr_points)
+ {
+ vertices->ResizeExact(nr_points, nr_points);
+ }
+
+ polydata->SetPoints(points);
+ polydata->SetVerts(vertices);
+
+#else
vtkSmartPointer<vtkIdTypeArray> cells = vertices->GetData ();
updateCells (cells, initcells, nr_points);
// Set the cells and the vertices
vertices->SetCells (nr_points, cells);
+
+ // Set the cell count explicitly as the array doesn't get modified enough so the above method updates accordingly. See #4001 and #3452
+ vertices->SetNumberOfCells(nr_points);
+#endif
}
//////////////////////////////////////////////////////////////////////////////////////////////
if (!vertices)
vertices = vtkSmartPointer<vtkCellArray>::New ();
+#ifdef VTK_CELL_ARRAY_V2
+ // TODO: Remove when VTK 6,7,8 is unsupported
+ pcl::utils::ignore(initcells);
+
+ auto numOfCells = vertices->GetNumberOfCells();
+
+ // If we have less cells than points, add new cells.
+ if (numOfCells < nr_points)
+ {
+ for (int i = numOfCells; i < nr_points; i++)
+ {
+ vertices->InsertNextCell(1);
+ vertices->InsertCellPoint(i);
+ }
+ }
+ // if we too many cells than points, set size (doesn't free excessive memory)
+ else if (numOfCells > nr_points)
+ {
+ vertices->ResizeExact(nr_points, nr_points);
+ }
+
+ polydata->SetPoints(points);
+ polydata->SetVerts(vertices);
+
+#else
vtkSmartPointer<vtkIdTypeArray> cells = vertices->GetData ();
updateCells (cells, initcells, nr_points);
// Set the cells and the vertices
vertices->SetCells (nr_points, cells);
+#endif
}
////////////////////////////////////////////////////////////////////////////////////////////
// If there is no custom viewport and the viewport number is not 0, exit
if (rens_->GetNumberOfItems () <= viewport)
{
- PCL_ERROR ("[addText3D] The viewport [%d] doesn't exist (id <%s>)! ",
+ PCL_ERROR ("[addText3D] The viewport [%d] doesn't exist (id <%s>)! \n",
viewport,
tid.c_str ());
return false;
const std::string uid = tid + std::string (i, '*');
if (contains (uid))
{
- PCL_ERROR ( "[addText3D] The id <%s> already exists in viewport [%d]! "
+ PCL_ERROR ( "[addText3D] The id <%s> already exists in viewport [%d]! \n"
"Please choose a different id and retry.\n",
tid.c_str (),
i);
// If there is no custom viewport and the viewport number is not 0, exit
if (rens_->GetNumberOfItems () <= viewport)
{
- PCL_ERROR ("[addText3D] The viewport [%d] doesn't exist (id <%s>)! ",
+ PCL_ERROR ("[addText3D] The viewport [%d] doesn't exist (id <%s>)!\n",
viewport,
tid.c_str ());
return false;
// Draw lines between the best corresponding points
for (std::size_t i = 0; i < correspondences.size (); i += nth, ++j)
{
- if (correspondences[i].index_match == -1)
+ if (correspondences[i].index_match == UNAVAILABLE)
{
PCL_WARN ("[addCorrespondences] No valid index_match for correspondence %d\n", i);
continue;
vtkSmartPointer<vtkIdTypeArray> initcells;
// Convert the PointCloud to VTK PolyData
convertPointCloudToVTKPolyData<PointT> (geometry_handler, polydata, initcells);
- // use the given geometry handler
// Get the colors from the handler
bool has_colors = false;
vtkSmartPointer<vtkPolyData> polydata = reinterpret_cast<vtkPolyDataMapper*>(am_it->second.actor->GetMapper ())->GetInput ();
if (!polydata)
return (false);
- vtkSmartPointer<vtkCellArray> vertices = polydata->GetVerts ();
- vtkSmartPointer<vtkPoints> points = polydata->GetPoints ();
- // Copy the new point array in
- vtkIdType nr_points = cloud->size ();
- points->SetNumberOfPoints (nr_points);
-
- // Get a pointer to the beginning of the data array
- float *data = (static_cast<vtkFloatArray*> (points->GetData ()))->GetPointer (0);
-
- vtkIdType pts = 0;
- // If the dataset is dense (no NaNs)
- if (cloud->is_dense)
- {
- for (vtkIdType i = 0; i < nr_points; ++i, pts += 3)
- std::copy (&(*cloud)[i].x, &(*cloud)[i].x + 3, &data[pts]);
- }
- else
- {
- vtkIdType j = 0; // true point index
- for (vtkIdType i = 0; i < nr_points; ++i)
- {
- // Check if the point is invalid
- if (!isFinite ((*cloud)[i]))
- continue;
- std::copy (&(*cloud)[i].x, &(*cloud)[i].x + 3, &data[pts]);
- pts += 3;
- j++;
- }
- nr_points = j;
- points->SetNumberOfPoints (nr_points);
- }
- vtkSmartPointer<vtkIdTypeArray> cells = vertices->GetData ();
- updateCells (cells, am_it->second.cells, nr_points);
-
- // Set the cells and the vertices
- vertices->SetCells (nr_points, cells);
- // Set the cell count explicitly as the array doesn't get modified enough so the above method updates accordingly. See #4001 and #3452
- vertices->SetNumberOfCells(nr_points);
+ convertPointCloudToVTKPolyData<PointT>(cloud, polydata, am_it->second.cells);
// Get the colors from the handler
bool has_colors = false;
{
// Create polys from polyMesh.polygons
vtkSmartPointer<vtkCellArray> cell_array = vtkSmartPointer<vtkCellArray>::New ();
- vtkIdType *cell = cell_array->WritePointer (vertices.size (), vertices.size () * (max_size_of_polygon + 1));
- int idx = 0;
- if (!lookup.empty ())
- {
- for (std::size_t i = 0; i < vertices.size (); ++i, ++idx)
- {
- std::size_t n_points = vertices[i].vertices.size ();
- *cell++ = n_points;
- //cell_array->InsertNextCell (n_points);
- for (std::size_t j = 0; j < n_points; j++, ++idx)
- *cell++ = lookup[vertices[i].vertices[j]];
- //cell_array->InsertCellPoint (lookup[vertices[i].vertices[j]]);
- }
- }
- else
- {
- for (std::size_t i = 0; i < vertices.size (); ++i, ++idx)
- {
- std::size_t n_points = vertices[i].vertices.size ();
- *cell++ = n_points;
- //cell_array->InsertNextCell (n_points);
- for (std::size_t j = 0; j < n_points; j++, ++idx)
- *cell++ = vertices[i].vertices[j];
- //cell_array->InsertCellPoint (vertices[i].vertices[j]);
- }
- }
+
+ const auto idx = details::fillCells(lookup,vertices,cell_array, max_size_of_polygon);
+
vtkSmartPointer<vtkPolyData> polydata;
allocVtkPolyData (polydata);
cell_array->GetData ()->SetNumberOfValues (idx);
// Update the cells
cells = vtkSmartPointer<vtkCellArray>::New ();
- vtkIdType *cell = cells->WritePointer (verts.size (), verts.size () * (max_size_of_polygon + 1));
- int idx = 0;
- if (!lookup.empty ())
- {
- for (std::size_t i = 0; i < verts.size (); ++i, ++idx)
- {
- std::size_t n_points = verts[i].vertices.size ();
- *cell++ = n_points;
- for (std::size_t j = 0; j < n_points; j++, cell++, ++idx)
- *cell = lookup[verts[i].vertices[j]];
- }
- }
- else
- {
- for (std::size_t i = 0; i < verts.size (); ++i, ++idx)
- {
- std::size_t n_points = verts[i].vertices.size ();
- *cell++ = n_points;
- for (std::size_t j = 0; j < n_points; j++, cell++, ++idx)
- *cell = verts[i].vertices[j];
- }
- }
+
+ const auto idx = details::fillCells(lookup, verts, cells, max_size_of_polygon);
+
cells->GetData ()->SetNumberOfValues (idx);
cells->Squeeze ();
// Set the the vertices
#include <pcl/pcl_macros.h>
#include <pcl/common/colors.h>
+#include <pcl/common/io.h> // for getFieldIndex
#include <pcl/common/point_tests.h> // for pcl::isFinite
std::size_t correspondences_new_size = cloud_intermediate_indices_.size ();
- std::stringstream stream_;
- stream_ << "Random -> correspondences " << correspondences_new_size;
+ const std::string correspondences_text = "Random -> correspondences " + std::to_string(correspondences_new_size);
viewer_->removeShape ("correspondences_size", 0);
- viewer_->addText (stream_.str(), 10, 70, 0.0, 1.0, 0.0, "correspondences_size", v2);
+ viewer_->addText (correspondences_text, 10, 70, 0.0, 1.0, 0.0, "correspondences_size", v2);
// Display entire set of correspondece lines if no maximum displayed correspondences is set
if( ( 0 < maximum_displayed_correspondences_ ) &&
template<typename PointSource, typename PointTarget> void
RegistrationVisualizer<PointSource, PointTarget>::updateIntermediateCloud (
const pcl::PointCloud<PointSource> &cloud_src,
- const std::vector<int> &indices_src,
+ const pcl::Indices &indices_src,
const pcl::PointCloud<PointTarget> &cloud_tgt,
- const std::vector<int> &indices_tgt)
+ const pcl::Indices &indices_tgt)
{
// Lock local buffers
visualizer_updating_mutex_.lock ();
#pragma once
-#include <pcl/console/print.h>
#include <pcl/visualization/common/actor_map.h>
#include <pcl/visualization/common/ren_win_interact_map.h>
#include <pcl/visualization/keyboard_event.h>
#include <pcl/visualization/mouse_event.h>
#include <pcl/visualization/point_picking_event.h>
#include <pcl/visualization/area_picking_event.h>
-#ifndef Q_MOC_RUN
#include <boost/signals2/signal.hpp>
-#endif
#include <vtkInteractorStyleRubberBandPick.h>
+#include <vtkRendererCollection.h>
+#include <vtkRenderWindow.h>
class vtkRendererCollection;
class vtkLegendScaleActor;
#pragma once
-#include <iostream>
-#include <map>
#include <vector>
#include <pcl/pcl_exports.h>
#include <vtkRenderer.h>
#include <iostream>
#include <vector>
#include <utility>
-#include <cfloat>
#include <pcl/visualization/common/common.h>
#include <pcl/point_types.h>
#include <pcl/point_cloud.h>
#include <pcl/common/io.h>
-class vtkRenderWindow;
-class vtkRenderWindowInteractor;
-class vtkContextView;
-class vtkChartXY;
-class vtkColorSeries;
-
+#include <vtkContextView.h>
+#include <vtkChartXY.h>
+#include <vtkColorSeries.h>
#include <vtkSmartPointer.h>
#include <vtkCommand.h>
#include <vtkChart.h>
+class vtkRenderWindow;
+class vtkRenderWindowInteractor;
+
namespace pcl
{
namespace visualization
template <typename PointT> bool
addFeatureHistogram (const pcl::PointCloud<PointT> &cloud,
const std::string &field_name,
- const int index,
+ const pcl::index_t index,
const std::string &id = "cloud", int win_width = 640, int win_height = 200);
/** \brief Add a histogram feature to screen as a separate window.
bool
addFeatureHistogram (const pcl::PCLPointCloud2 &cloud,
const std::string &field_name,
- const int index,
+ const pcl::index_t index,
const std::string &id = "cloud", int win_width = 640, int win_height = 200);
/** \brief Draws all the plots added by addPlotData() or addHistogramData() till now */
#include <pcl/visualization/area_picking_event.h>
#include <pcl/visualization/interactor_style.h>
+#include <vtkOrientationMarkerWidget.h>
+#include <vtkRenderWindowInteractor.h>
+
// VTK includes
class vtkPolyData;
class vtkTextActor;
class vtkRenderWindow;
-class vtkOrientationMarkerWidget;
class vtkAppendPolyData;
class vtkRenderWindow;
-class vtkRenderWindowInteractor;
class vtkTransform;
class vtkInteractorStyle;
class vtkLODActor;
class vtkActor;
class vtkDataSet;
class vtkUnstructuredGrid;
+class vtkCellArray;
namespace pcl
{
namespace visualization
{
+ namespace details
+ {
+ PCL_EXPORTS vtkIdType fillCells(std::vector<int>& lookup, const std::vector<pcl::Vertices>& vertices, vtkSmartPointer<vtkCellArray> cell_array, int max_size_of_polygon);
+ }
+
/** \brief PCL Visualizer main class.
* \author Radu B. Rusu
* \ingroup visualization
addCube (float x_min, float x_max, float y_min, float y_max, float z_min, float z_max,
double r = 1.0, double g = 1.0, double b = 1.0, const std::string &id = "cube", int viewport = 0);
+ /** \brief Add an ellipsoid from the given parameters
+ * \param[in] transform a transformation to apply to the ellipsoid from 0,0,0
+ * \param[in] radius_x the ellipsoid's radius along its local x-axis
+ * \param[in] radius_y the ellipsoid's radius along its local y-axis
+ * \param[in] radius_z the ellipsoid's radius along its local z-axis
+ * \param[in] id the ellipsoid id/name (default: "ellipsoid")
+ * \param[in] viewport (optional) the id of the new viewport (default: 0)
+ */
+ bool
+ addEllipsoid (const Eigen::Isometry3d &transform,
+ double radius_x, double radius_y, double radius_z,
+ const std::string &id = "ellipsoid",
+ int viewport = 0);
+
/** \brief Changes the visual representation for all actors to surface representation. */
void
setRepresentationToSurfaceForAllActors ();
* \param[in] view_z the z component of the view point of the camera
* \param[in] up_x the x component of the view up direction of the camera
* \param[in] up_y the y component of the view up direction of the camera
- * \param[in] up_z the y component of the view up direction of the camera
+ * \param[in] up_z the z component of the view up direction of the camera
* \param[in] viewport the viewport to modify camera of (0 modifies all cameras)
*/
void
vtkSmartPointer<vtkPolyData> &polydata,
vtkSmartPointer<vtkIdTypeArray> &initcells);
- /** \brief Converts a PCL templated PointCloud object to a vtk polydata object.
+ /** \brief Converts a PCL object to a vtk polydata object.
* \param[in] geometry_handler the geometry handler object used to extract the XYZ data
* \param[out] polydata the resultant polydata containing the cloud
* \param[out] initcells a list of cell indices used for the conversion. This can be set once and then passed
// PCL includes
#include <pcl/pcl_macros.h>
#include <pcl/point_cloud.h>
-#include <pcl/common/io.h>
+#include <pcl/PCLPointCloud2.h> // for PCLPointCloud2
#include <pcl/visualization/common/common.h>
// VTK includes
#include <vtkSmartPointer.h>
getFieldName () const = 0;
/** Obtain the actual color for the input dataset as a VTK data array.
- * Deriving handlers should override this method. The default implementation is
- * provided only for backwards compatibility with handlers that were written
- * before PCL 1.10.0 and will be removed in future.
+ * Deriving handlers should override this method.
* \return smart pointer to VTK array if the operation was successful (the
* handler is capable and the input cloud was given), a null pointer otherwise */
virtual vtkSmartPointer<vtkDataArray>
- getColor () const {
- vtkSmartPointer<vtkDataArray> scalars;
- getColor (scalars);
- return scalars;
- }
-
- /** Obtain the actual color for the input dataset as a VTK data array.
- * This virtual method should not be overriden or used. The default implementation
- * is provided only for backwards compatibility with handlers that were written
- * before PCL 1.10.0 and will be removed in future. */
- PCL_DEPRECATED(1, 12, "use getColor() without parameters instead")
- virtual bool
- getColor (vtkSmartPointer<vtkDataArray> &scalars) const {
- scalars = getColor ();
- return scalars.Get() != nullptr;
- }
+ getColor () const = 0;
/** \brief Set the input cloud to be used.
* \param[in] cloud the input cloud to be used by the handler
vtkSmartPointer<vtkDataArray>
getColor () const override;
- using PointCloudColorHandler<PointT>::getColor;
-
protected:
// Members derived from the base class
using PointCloudColorHandler<PointT>::cloud_;
vtkSmartPointer<vtkDataArray>
getColor () const override;
- using PointCloudColorHandler<PointT>::getColor;
-
protected:
// Members derived from the base class
using PointCloudColorHandler<PointT>::cloud_;
vtkSmartPointer<vtkDataArray>
getColor () const override;
- using PointCloudColorHandler<PointT>::getColor;
-
/** \brief Set the input cloud to be used.
* \param[in] cloud the input cloud to be used by the handler
*/
vtkSmartPointer<vtkDataArray>
getColor () const override;
- using PointCloudColorHandler<PointT>::getColor;
-
protected:
/** \brief Class getName method. */
virtual std::string
vtkSmartPointer<vtkDataArray>
getColor () const override;
- using PointCloudColorHandler<PointT>::getColor;
-
/** \brief Set the input cloud to be used.
* \param[in] cloud the input cloud to be used by the handler
*/
vtkSmartPointer<vtkDataArray>
getColor () const override;
- using PointCloudColorHandler<PointT>::getColor;
-
/** \brief Set the input cloud to be used.
* \param[in] cloud the input cloud to be used by the handler
*/
* \return smart pointer to VTK array if the operation was successful (the
* handler is capable and the input cloud was given), a null pointer otherwise */
virtual vtkSmartPointer<vtkDataArray>
- getColor () const {
- vtkSmartPointer<vtkDataArray> scalars;
- getColor (scalars);
- return scalars;
- }
-
- /** Obtain the actual color for the input dataset as a VTK data array.
- * This virtual method should not be overriden or used. The default implementation
- * is provided only for backwards compatibility with handlers that were written
- * before PCL 1.10.0 and will be removed in future. */
- PCL_DEPRECATED(1, 12, "use getColor() without parameters instead")
- virtual bool
- getColor (vtkSmartPointer<vtkDataArray> &scalars) const {
- scalars = getColor ();
- return scalars.Get() != nullptr;
- }
+ getColor() const = 0;
/** \brief Set the input cloud to be used.
* \param[in] cloud the input cloud to be used by the handler
vtkSmartPointer<vtkDataArray>
getColor () const override;
-
- using PointCloudColorHandler<pcl::PCLPointCloud2>::getColor;
};
//////////////////////////////////////////////////////////////////////////////////////
vtkSmartPointer<vtkDataArray>
getColor () const override;
- using PointCloudColorHandler<pcl::PCLPointCloud2>::getColor;
-
protected:
/** \brief Internal R, G, B holding the values given by the user. */
double r_, g_, b_;
vtkSmartPointer<vtkDataArray>
getColor () const override;
- using PointCloudColorHandler<pcl::PCLPointCloud2>::getColor;
-
protected:
/** \brief Get the name of the class. */
virtual std::string
vtkSmartPointer<vtkDataArray>
getColor () const override;
- using PointCloudColorHandler<pcl::PCLPointCloud2>::getColor;
-
protected:
/** \brief Get the name of the class. */
virtual std::string
vtkSmartPointer<vtkDataArray>
getColor () const override;
- using PointCloudColorHandler<pcl::PCLPointCloud2>::getColor;
-
protected:
/** \brief Get the name of the class. */
virtual std::string
vtkSmartPointer<vtkDataArray>
getColor () const override;
- using PointCloudColorHandler<pcl::PCLPointCloud2>::getColor;
-
protected:
/** \brief Get the name of the class. */
virtual std::string
vtkSmartPointer<vtkDataArray>
getColor () const override;
- using PointCloudColorHandler<pcl::PCLPointCloud2>::getColor;
-
protected:
/** \brief Get the name of the class. */
virtual std::string
#endif
// PCL includes
+#include <pcl/pcl_base.h> // for UNAVAILABLE
#include <pcl/point_cloud.h>
#include <pcl/common/io.h>
// VTK includes
#pragma once
#include <pcl/pcl_macros.h>
-#include <vector>
+#include <pcl/types.h> // for pcl::Indices
#include <vtkCommand.h>
class vtkRenderWindowInteractor;
performSinglePick (vtkRenderWindowInteractor *iren, float &x, float &y, float &z);
int
- performAreaPick (vtkRenderWindowInteractor *iren, std::vector<int> &indices) const;
+ performAreaPick (vtkRenderWindowInteractor *iren, pcl::Indices &indices) const;
private:
float x_, y_, z_;
--- /dev/null
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Point Cloud Library (PCL) - www.pointclouds.org
+ * Copyright (c) 2020-, Open Perception
+ *
+ * All rights reserved
+ */
+#include <pcl/pcl_macros.h>
+#include <pcl/pcl_config.h>
+
+#if HAVE_QVTK
+#include <vtkVersion.h>
+#include <vtkRenderWindow.h>
+
+#if VTK_MAJOR_VERSION > 8
+ #include <QVTKOpenGLNativeWidget.h>
+ using PCLQVTKWidget = QVTKOpenGLNativeWidget;
+#else
+ #include <QVTKWidget.h>
+ using PCLQVTKWidget = QVTKWidget;
+#endif // VTK_MAJOR_VERSION > 8
+
+
+inline auto PCL_EXPORTS getInteractorCompat(PCLQVTKWidget& qvtk) {
+#if VTK_MAJOR_VERSION > 8
+ return qvtk.interactor();
+#else
+ return qvtk.GetInteractor();
+#endif // VTK_MAJOR_VERSION > 8
+}
+
+inline auto PCL_EXPORTS getRenderWindowCompat(PCLQVTKWidget& qvtk) {
+#if VTK_MAJOR_VERSION > 8
+ return qvtk.renderWindow();
+#else
+ return qvtk.GetRenderWindow();
+#endif // VTK_MAJOR_VERSION > 8
+}
+
+inline auto PCL_EXPORTS setRenderWindowCompat(PCLQVTKWidget& qvtk, vtkRenderWindow& window) {
+#if VTK_MAJOR_VERSION > 8
+ return qvtk.setRenderWindow(&window);
+#else
+ return qvtk.SetRenderWindow(&window);
+#endif // VTK_MAJOR_VERSION > 8
+}
+
+#else
+#error PCL is not compiled with QVTK.
+#endif
// Create the local callback function and bind it to the local function responsible for updating
// the local buffers
- update_visualizer_ = [this] (const pcl::PointCloud<PointSource>& cloud_src, const std::vector<int>& indices_src,
- const pcl::PointCloud<PointTarget>& cloud_tgt, const std::vector<int>& indices_tgt)
+ update_visualizer_ = [this] (const pcl::PointCloud<PointSource>& cloud_src, const pcl::Indices& indices_src,
+ const pcl::PointCloud<PointTarget>& cloud_tgt, const pcl::Indices& indices_tgt)
{
updateIntermediateCloud (cloud_src, indices_src, cloud_tgt, indices_tgt);
};
* \param indices_tgt represents the indices of the target points used for the estimation of rigid transformation
*/
void
- updateIntermediateCloud (const pcl::PointCloud<PointSource> &cloud_src, const std::vector<int> &indices_src,
- const pcl::PointCloud<PointTarget> &cloud_tgt, const std::vector<int> &indices_tgt);
+ updateIntermediateCloud (const pcl::PointCloud<PointSource> &cloud_src, const pcl::Indices &indices_src,
+ const pcl::PointCloud<PointTarget> &cloud_tgt, const pcl::Indices &indices_tgt);
/** \brief Set maximum number of correspondence lines which will be rendered. */
inline void
inline std::string
getIndexedName (std::string &root_name, std::size_t &id)
{
- std::stringstream id_stream_;
- id_stream_ << id;
- std::string indexed_name_ = root_name + id_stream_.str ();
- return indexed_name_;
+ return root_name + std::to_string(id);
}
/** \brief The registration viewer. */
/** \brief Callback function linked to pcl::Registration::update_visualizer_ */
std::function<void
- (const pcl::PointCloud<PointSource> &cloud_src, const std::vector<int> &indices_src, const pcl::PointCloud<
- PointTarget> &cloud_tgt, const std::vector<int> &indices_tgt)> update_visualizer_;
+ (const pcl::PointCloud<PointSource> &cloud_src, const pcl::Indices &indices_src, const pcl::PointCloud<
+ PointTarget> &cloud_tgt, const pcl::Indices &indices_tgt)> update_visualizer_;
/** \brief Updates source and target point clouds only for the first update call. */
bool first_update_flag_;
pcl::PointCloud<PointSource> cloud_intermediate_;
/** \brief The indices of intermediate points used for computation of rigid transformation. */
- std::vector<int> cloud_intermediate_indices_;
+ pcl::Indices cloud_intermediate_indices_;
/** \brief The indices of target points used for computation of rigid transformation. */
- std::vector<int> cloud_target_indices_;
+ pcl::Indices cloud_target_indices_;
/** \brief The maximum number of displayed correspondences. */
std::size_t maximum_displayed_correspondences_;
void
initValuesAndVisualization ()
{
- cloud_.points.resize(1);
+ cloud_.resize(1);
PCL_WARN("Setting buffer size to %d entries.\n", nb_values_);
values_.resize(nb_values_);
/*
- * Software License Agreement (BSD License)
+ * SPDX-License-Identifier: BSD-3-Clause
*
* Point Cloud Library (PCL) - www.pointclouds.org
- * Copyright (c) 2009-2012, Willow Garage, Inc.
- * Copyright (c) 2012-, Open Perception, Inc.
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of the copyright holder(s) nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * $Id$
+ * Copyright (c) 2020-, Open Perception
*
+ * All rights reserved
*/
-#pragma once
-
-#if defined __GNUC__
-#pragma GCC system_header
-#ifdef __DEPRECATED
-#undef __DEPRECATED
-#define __DEPRECATED_DISABLED__
-#endif
-#endif
-
-#include <vtkVersion.h>
-#include <vtkAppendPolyData.h>
-#include <vtkAssemblyPath.h>
-#include <vtkAxesActor.h>
-#include <vtkActor.h>
-#include <vtkBoxRepresentation.h>
-#include <vtkBoxWidget.h>
-#include <vtkBoxWidget2.h>
-#include <vtkCellData.h>
-#include <vtkMath.h>
-#include <vtkLoopSubdivisionFilter.h>
-#include <vtkLineSource.h>
-#include <vtkLegendScaleActor.h>
-#include <vtkLightKit.h>
-#include <vtkPlatonicSolidSource.h>
-#include <vtkPropPicker.h>
-#include <vtkGeneralTransform.h>
-#include <vtkSmartPointer.h>
-#include <vtkDataSet.h>
-#include <vtkDataSetSurfaceFilter.h>
-#include <vtkExecutive.h>
-#include <vtkPolygon.h>
-#include <vtkPointPicker.h>
-#include <vtkUnstructuredGrid.h>
-#include <vtkConeSource.h>
-#include <vtkDiskSource.h>
-#include <vtkPlaneSource.h>
-#include <vtkSphereSource.h>
-#include <vtkIdentityTransform.h>
-#include <vtkTransform.h>
-#include <vtkTransformPolyDataFilter.h>
-#include <vtkTubeFilter.h>
-#include <vtkCubeSource.h>
-#include <vtkAxes.h>
-#include <vtkFloatArray.h>
-#include <vtkPointData.h>
-#include <vtkPolyData.h>
-#include <vtkPolyDataReader.h>
-#include <vtkPolyDataMapper.h>
-#include <vtkDataSetMapper.h>
-#include <vtkCellArray.h>
-#include <vtkCommand.h>
-#include <vtkCellLocator.h>
-#include <vtkPLYReader.h>
-#include <vtkTransformFilter.h>
-#include <vtkPolyLine.h>
-#include <vtkVectorText.h>
-#include <vtkFollower.h>
-#include <vtkCallbackCommand.h>
-#include <vtkInteractorStyle.h>
-#include <vtkInformationVector.h>
-#include <vtkDataArray.h>
-#include <vtkUnsignedCharArray.h>
-#include <vtkPoints.h>
-#include <vtkRendererCollection.h>
-#include <vtkPNGWriter.h>
-#include <vtkWindowToImageFilter.h>
-#include <vtkInteractorStyleTrackballCamera.h>
-#include <vtkProperty.h>
-#include <vtkCamera.h>
-#include <vtkObjectFactory.h>
-#include <vtkScalarBarActor.h>
-#include <vtkScalarsToColors.h>
-#include <vtkClipPolyData.h>
-#include <vtkPlanes.h>
-#include <vtkImageImport.h>
-#include <vtkImageViewer.h>
-#include <vtkInteractorStyleImage.h>
-#include <vtkImageFlip.h>
-#include <vtkTIFFWriter.h>
-#include <vtkBMPWriter.h>
-#include <vtkJPEGWriter.h>
-#include <vtkImageViewer2.h>
-#include <vtkRenderWindow.h>
-#include <vtkXYPlotActor.h>
-#include <vtkTextProperty.h>
-#include <vtkProperty2D.h>
-#include <vtkFieldData.h>
-#include <vtkDoubleArray.h>
-#include <vtkLODActor.h>
-#include <vtkPolyDataWriter.h>
-#include <vtkTextActor.h>
-#include <vtkCleanPolyData.h>
-#include <vtkRenderer.h>
-#include <vtkObject.h>
-#include <vtkOrientationMarkerWidget.h>
-#include <vtkImageReslice.h>
-#include <vtkImageChangeInformation.h>
-#include <vtkImageCanvasSource2D.h>
-#include <vtkImageBlend.h>
-#include <vtkImageStencilData.h>
-#include <vtkRenderWindowInteractor.h>
-#include <vtkChartXY.h>
-#include <vtkPlot.h>
-#include <vtkTable.h>
-#include <vtkContextView.h>
-#include <vtkContextScene.h>
-#include <vtkColorSeries.h>
-#include <vtkAxis.h>
-#include <vtkSelection.h>
-
-#include <vtkHardwareSelector.h>
-
-#include <vtkTriangle.h>
-#include <vtkWorldPointPicker.h>
-
-#include <vtkInteractorStyleRubberBandPick.h>
-#include <vtkInteractorStyleTrackballActor.h>
-#include <vtkAreaPicker.h>
-#include <vtkExtractGeometry.h>
-#include <vtkExtractPolyDataGeometry.h>
-#include <vtkVertexGlyphFilter.h>
-#include <vtkIdFilter.h>
-#include <vtkIdTypeArray.h>
-#include <vtkImageReader2Factory.h>
-#include <vtkImageReader2.h>
-#include <vtkImageData.h>
-
-#if defined __GNUC__ && defined __DEPRECATED_DISABLED__
-#define __DEPRECATED
-#undef __DEPRECATED_DISABLED__
-#endif
+PCL_DEPRECATED_HEADER(1, 14, "Use required vtk includes instead.")
--- /dev/null
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Point Cloud Library (PCL) - www.pointclouds.org
+ * Copyright (c) 2020-, Open Perception
+ *
+ * All rights reserved
+ */
+
+#include <vtkCellArray.h>
+
+#ifdef VTK_CELL_ARRAY_V2
+ using vtkCellPtsPtr = vtkIdType const*;
+#else
+ using vtkCellPtsPtr = vtkIdType*;
+#endif
+
#pragma once
-#include <vector>
-
#include "vtkObject.h"
#include "vtkWeakPointer.h"
#include <pcl/visualization/cloud_viewer.h>
#include <pcl/visualization/pcl_visualizer.h>
-#include <pcl/visualization/boost.h>
#include <pcl/memory.h>
+#include <vtkOrientationMarkerWidget.h>
+#include <vtkRenderWindowInteractor.h>
+
#include <mutex>
#include <thread>
#include <vtkCamera.h>
#include <vtkRenderWindow.h>
+#include <Eigen/Geometry> // for cross
+
#include <pcl/point_types.h>
#include <pcl/visualization/common/common.h>
#include <pcl/console/print.h>
#include <pcl/visualization/common/io.h>
#include <pcl/kdtree/kdtree_flann.h>
#include <pcl/io/pcd_io.h>
-#include <pcl/visualization/eigen.h>
#include <pcl/memory.h>
//////////////////////////////////////////////////////////////////////////////////////////////
void
pcl::visualization::getCorrespondingPointCloud (vtkPolyData *src,
const pcl::PointCloud<pcl::PointXYZ> &tgt,
- std::vector<int> &indices)
+ pcl::Indices &indices)
{
// Iterate through the points and copy the data in a pcl::PointCloud
pcl::PointCloud<pcl::PointXYZ> cloud;
cloud.height = 1; cloud.width = static_cast<std::uint32_t> (src->GetNumberOfPoints ());
cloud.is_dense = false;
- cloud.points.resize (cloud.width * cloud.height);
+ cloud.resize (cloud.width * cloud.height);
for (vtkIdType i = 0; i < src->GetNumberOfPoints (); i++)
{
double p[3];
pcl::KdTreeFLANN<pcl::PointXYZ> kdtree;
kdtree.setInputCloud (make_shared<PointCloud<PointXYZ>> (tgt));
- std::vector<int> nn_indices (1);
+ pcl::Indices nn_indices (1);
std::vector<float> nn_dists (1);
// For each point on screen, find its correspondent in the target
for (const auto &point : cloud.points)
pcl::PointCloud<pcl::PointXYZ> cloud_xyz;
pcl::fromPCLPointCloud2 (cloud, cloud_xyz);
// Get the corresponding indices that we need to save from this point cloud
- std::vector<int> indices;
+ pcl::Indices indices;
getCorrespondingPointCloud (cleaner->GetOutput (), cloud_xyz, indices);
// Copy the indices and save the file
pcl::PCLPointCloud2 cloud_out;
pcl::copyPointCloud (cloud, indices, cloud_out);
- std::stringstream ss;
- ss << out_file << i++ << ".pcd";
- pcl::console::print_debug (" Save: %s ... ", ss.str ().c_str ());
- if (pcl::io::savePCDFile (ss.str (), cloud_out, Eigen::Vector4f::Zero (),
+ const std::string out_filename = out_file + std::to_string(i++) + ".pcd";
+ pcl::console::print_debug (" Save: %s ... ", out_filename.c_str ());
+ if (pcl::io::savePCDFile (out_filename, cloud_out, Eigen::Vector4f::Zero (),
Eigen::Quaternionf::Identity (), true) == -1)
{
pcl::console::print_error (stdout, "[failed]\n");
#include <vtkDiskSource.h>
#include <vtkPlaneSource.h>
#include <vtkCubeSource.h>
+#include <vtkParametricEllipsoid.h>
+#include <vtkParametricFunctionSource.h>
////////////////////////////////////////////////////////////////////////////////////////////
vtkSmartPointer<vtkDataSet>
return (line->GetOutput ());
}
+//////////////////////////////////////////////////////////////////////////////////////////////
+vtkSmartPointer<vtkDataSet>
+pcl::visualization::createEllipsoid (const Eigen::Isometry3d &transform,
+ double radius_x, double radius_y, double radius_z)
+{
+ const vtkSmartPointer<vtkTransform> t = vtkSmartPointer<vtkTransform>::New ();
+ const Eigen::Matrix4d trMatrix = transform.matrix ().transpose (); // Eigen is col-major while vtk is row-major, so transpose it.
+ t->SetMatrix (trMatrix.data ());
+
+ vtkSmartPointer<vtkParametricEllipsoid> ellipsoid = vtkSmartPointer<vtkParametricEllipsoid>::New ();
+ ellipsoid->SetXRadius (radius_x);
+ ellipsoid->SetYRadius (radius_y);
+ ellipsoid->SetZRadius (radius_z);
+
+ vtkSmartPointer<vtkParametricFunctionSource> s_ellipsoid = vtkSmartPointer<vtkParametricFunctionSource>::New ();
+ s_ellipsoid->SetParametricFunction (ellipsoid);
+
+ vtkSmartPointer<vtkTransformPolyDataFilter> tf = vtkSmartPointer<vtkTransformPolyDataFilter>::New ();
+ tf->SetTransform (t);
+ tf->SetInputConnection (s_ellipsoid->GetOutputPort ());
+ tf->Update ();
+
+ return (tf->GetOutput ());
+}
+
//////////////////////////////////////////////////////////////////////////////////////////////
void
pcl::visualization::allocVtkUnstructuredGrid (vtkSmartPointer<vtkUnstructuredGrid> &polydata)
#include <pcl/visualization/common/common.h>
#include <vtkRenderWindowInteractor.h>
#include <pcl/visualization/histogram_visualizer.h>
-#include <pcl/visualization/boost.h>
#include <vtkVersion.h>
-#include <vtkXYPlotActor.h>
#include <vtkDoubleArray.h>
#include <vtkTextProperty.h>
#include <vtkRenderWindow.h>
int field_idx = pcl::getFieldIndex (cloud, field_name);
if (field_idx == -1)
{
- PCL_ERROR ("[addFeatureHistogram] Invalid field (%s) given!", field_name.c_str ());
+ PCL_ERROR ("[addFeatureHistogram] Invalid field (%s) given!\n", field_name.c_str ());
return (false);
}
// Parse the cloud data and store it in the array
double xy[2];
- for (unsigned int d = 0; d < cloud.fields[field_idx].count; ++d)
+ for (uindex_t d = 0; d < cloud.fields[field_idx].count; ++d)
{
xy[0] = d;
float data;
pcl::visualization::PCLHistogramVisualizer::addFeatureHistogram (
const pcl::PCLPointCloud2 &cloud,
const std::string &field_name,
- const int index,
+ const pcl::index_t index,
const std::string &id, int win_width, int win_height)
{
- if (index < 0 || index >= static_cast<int> (cloud.width * cloud.height))
+ if (index < 0 || index >= static_cast<pcl::index_t> (cloud.width * cloud.height))
{
PCL_ERROR ("[addFeatureHistogram] Invalid point index (%d) given!\n", index);
return (false);
// Parse the cloud data and store it in the array
double xy[2];
- for (unsigned int d = 0; d < cloud.fields[field_idx].count; ++d)
+ for (uindex_t d = 0; d < cloud.fields[field_idx].count; ++d)
{
xy[0] = d;
float data;
// Parse the cloud data and store it in the array
double xy[2];
- for (unsigned int d = 0; d < cloud.fields[field_idx].count; ++d)
+ for (uindex_t d = 0; d < cloud.fields[field_idx].count; ++d)
{
xy[0] = d;
float data;
pcl::visualization::PCLHistogramVisualizer::updateFeatureHistogram (
const pcl::PCLPointCloud2 &cloud,
const std::string &field_name,
- const int index,
+ const pcl::index_t index,
const std::string &id)
{
- if (index < 0 || index >= static_cast<int> (cloud.width * cloud.height))
+ if (index < 0 || index >= static_cast<pcl::index_t> (cloud.width * cloud.height))
{
PCL_ERROR ("[updateFeatureHistogram] Invalid point index (%d) given!\n", index);
return (false);
// Parse the cloud data and store it in the array
double xy[2];
- for (unsigned int d = 0; d < cloud.fields[field_idx].count; ++d)
+ for (uindex_t d = 0; d < cloud.fields[field_idx].count; ++d)
{
xy[0] = d;
float data;
*
*/
+#include <fstream>
#include <list>
#include <pcl/common/angles.h>
#include <pcl/visualization/common/io.h>
#include <vtkPointPicker.h>
#include <vtkAreaPicker.h>
+#include <boost/algorithm/string/classification.hpp> // for is_any_of
+#include <boost/algorithm/string/split.hpp> // for split
+#include <boost/filesystem.hpp> // for exists
+
#if VTK_RENDERING_BACKEND_OPENGL_VERSION < 2
#include <pcl/visualization/vtk/vtkVertexBufferObjectMapper.h>
#endif
{
FindPokedRenderer (Interactor->GetEventPosition ()[0], Interactor->GetEventPosition ()[1]);
- ofstream ofs_cam (file.c_str ());
+ std::ofstream ofs_cam (file.c_str ());
if (!ofs_cam.is_open ())
{
return (false);
if(CurrentRenderer)
CurrentRenderer->ResetCamera ();
else
- PCL_WARN ("no current renderer on the interactor style.");
+ PCL_WARN ("no current renderer on the interactor style.\n");
CurrentRenderer->Render ();
break;
int field_idx = pcl::getFieldIndex (cloud, field_name);
if (field_idx == -1)
{
- PCL_ERROR ("[addFeatureHistogram] Invalid field (%s) given!", field_name.c_str ());
+ PCL_ERROR ("[addFeatureHistogram] Invalid field (%s) given!\n", field_name.c_str ());
return (false);
}
pcl::visualization::PCLPlotter::addFeatureHistogram (
const pcl::PCLPointCloud2 &cloud,
const std::string &field_name,
- const int index,
+ const pcl::index_t index,
const std::string &id, int win_width, int win_height)
{
- if (index < 0 || index >= static_cast<int> (cloud.width * cloud.height))
+ if (index < 0 || index >= static_cast<pcl::index_t> (cloud.width * cloud.height))
{
PCL_ERROR ("[addFeatureHistogram] Invalid point index (%d) given!\n", index);
return (false);
int field_idx = pcl::getFieldIndex (cloud, field_name);
if (field_idx == -1)
{
- PCL_ERROR ("[addFeatureHistogram] Invalid field (%s) given!", field_name.c_str ());
+ PCL_ERROR ("[addFeatureHistogram] Invalid field (%s) given!\n", field_name.c_str ());
return (false);
}
#include <vtkPolyDataNormals.h>
#include <vtkMapper.h>
#include <vtkDataSetMapper.h>
-
+#include <vtkCellArray.h>
#include <vtkHardwareSelector.h>
#include <vtkSelectionNode.h>
#include <vtkSelection.h>
#include <vtkPointPicker.h>
-#include <pcl/visualization/boost.h>
#include <pcl/visualization/vtk/vtkRenderWindowInteractorFix.h>
+#include <pcl/visualization/vtk/pcl_vtk_compatibility.h>
#if VTK_RENDERING_BACKEND_OPENGL_VERSION < 2
#include <pcl/visualization/vtk/vtkVertexBufferObjectMapper.h>
#include <vtkAxesActor.h>
#include <vtkRenderWindowInteractor.h>
#include <vtkAreaPicker.h>
-#include <vtkXYPlotActor.h>
#include <vtkOpenGLRenderWindow.h>
#include <vtkJPEGReader.h>
#include <vtkBMPReader.h>
#include <pcl/visualization/pcl_visualizer.h>
#include <pcl/visualization/common/common.h>
#include <pcl/common/time.h>
+#include <boost/version.hpp> // for BOOST_VERSION
#if (BOOST_VERSION >= 106600)
#include <boost/uuid/detail/sha1.hpp>
#else
#include <boost/uuid/sha1.hpp>
#endif
#include <boost/filesystem.hpp>
+#include <boost/algorithm/string.hpp> // for split
#include <pcl/common/utils.h> // pcl::utils::ignore
#include <pcl/console/parse.h>
#undef far
#endif
+vtkIdType
+pcl::visualization::details::fillCells(std::vector<int>& lookup, const std::vector<pcl::Vertices>& vertices, vtkSmartPointer<vtkCellArray> cell_array, int max_size_of_polygon)
+{
+#ifdef VTK_CELL_ARRAY_V2
+ pcl::utils::ignore(max_size_of_polygon);
+
+ if (!lookup.empty())
+ {
+ for (const auto& verti : vertices)
+ {
+ std::size_t n_points = verti.vertices.size();
+ cell_array->InsertNextCell(n_points);
+ for (const auto& vertj : verti.vertices)
+ cell_array->InsertCellPoint(lookup[vertj]);
+ }
+ }
+ else
+ {
+ for (const auto& verti : vertices)
+ {
+ std::size_t n_points = verti.vertices.size();
+ cell_array->InsertNextCell(n_points);
+ for (const auto& vertj : verti.vertices)
+ cell_array->InsertCellPoint(vertj);
+ }
+ }
+#else
+ vtkIdType* cell = cell_array->WritePointer(vertices.size(), vertices.size() * (max_size_of_polygon + 1));
+
+ if (!lookup.empty())
+ {
+ for (const auto& verti : vertices)
+ {
+ std::size_t n_points = verti.vertices.size();
+ *cell++ = n_points;
+ for (const auto& vertj : verti.vertices)
+ *cell++ = lookup[vertj];
+ }
+ }
+ else
+ {
+ for (const auto& verti : vertices)
+ {
+ std::size_t n_points = verti.vertices.size();
+ *cell++ = n_points;
+ for (const auto& vertj : verti.vertices)
+ *cell++ = vertj;
+ }
+ }
+#endif
+
+ const auto idx = vertices.size() + std::accumulate(vertices.begin(), vertices.end(), static_cast<vtkIdType>(0),
+ [](const auto& sum, const auto& vertex) { return sum + vertex.vertices.size(); });
+
+ return idx;
+}
+
/////////////////////////////////////////////////////////////////////////////////////////////
pcl::visualization::PCLVisualizer::PCLVisualizer (const std::string &name, const bool create_interactor)
: update_fps_ (vtkSmartPointer<FPSCallback>::New ())
void pcl::visualization::PCLVisualizer::setupRenderer (vtkSmartPointer<vtkRenderer> ren)
{
if (!ren)
- PCL_ERROR ("Passed pointer to renderer is null");
+ PCL_ERROR ("Passed pointer to renderer is null\n");
ren->AddObserver (vtkCommand::EndEvent, update_fps_);
// Add it to the list of renderers
void pcl::visualization::PCLVisualizer::setupFPSCallback (const vtkSmartPointer<vtkRenderer>& ren)
{
if (!ren)
- PCL_ERROR ("Passed pointer to renderer is null");
+ PCL_ERROR ("Passed pointer to renderer is null\n");
// FPS callback
vtkSmartPointer<vtkTextActor> txt = vtkSmartPointer<vtkTextActor>::New ();
update_fps_->actor = txt;
void pcl::visualization::PCLVisualizer::setupRenderWindow (const std::string& name)
{
if (!win_)
- PCL_ERROR ("Pointer to render window is null");
+ PCL_ERROR ("Pointer to render window is null\n");
// This is temporary measures for disable display of deprecated warnings
#if VTK_RENDERING_BACKEND_OPENGL_VERSION < 2
void pcl::visualization::PCLVisualizer::setupStyle ()
{
if (!style_)
- PCL_ERROR ("Pointer to style is null");
+ PCL_ERROR ("Pointer to style is null\n");
// Set rend erer window in case no interactor is created
style_->setRenderWindow (win_);
void pcl::visualization::PCLVisualizer::setDefaultWindowSizeAndPos ()
{
if (!win_)
- PCL_ERROR ("Pointer to render window is null");
+ PCL_ERROR ("Pointer to render window is null\n");
int scr_size_x = win_->GetScreenSize ()[0];
int scr_size_y = win_->GetScreenSize ()[1];
win_->SetSize (scr_size_x / 2, scr_size_y / 2);
pcl::visualization::PCLVisualizer::spinOnce (int time, bool force_redraw)
{
resetStoppedFlag ();
- #if (defined (__APPLE__))
- if (!win_->IsDrawable ())
- {
- close ();
- return;
- }
- #endif
+
+#if VTK_MAJOR_VERSION < 9 && defined (__APPLE__)
+ if (!win_->IsDrawable ())
+ {
+ close ();
+ return;
+ }
+#endif
if (!interactor_)
return;
// If there is no custom viewport and the viewport number is not 0, exit
if (rens_->GetNumberOfItems () <= viewport)
{
- PCL_ERROR ("[removeText3D] The viewport [%d] doesn't exist (id <%s>)! ",
+ PCL_ERROR ("[removeText3D] The viewport [%d] doesn't exist (id <%s>)!\n",
viewport,
id.c_str ());
return false;
//actor->SetNumberOfCloudPoints (std::max<vtkIdType> (1, data->GetNumberOfPoints () / 10));
actor->GetProperty ()->SetInterpolationToFlat ();
}
-
/////////////////////////////////////////////////////////////////////////////////////////////
void
pcl::visualization::PCLVisualizer::convertPointCloudToVTKPolyData (
if (!vertices)
vertices = vtkSmartPointer<vtkCellArray>::New ();
+#ifdef VTK_CELL_ARRAY_V2
+ // TODO: Remove when VTK 6,7,8 is unsupported
+ pcl::utils::ignore(initcells);
+
+ auto numOfCells = vertices->GetNumberOfCells();
+
+ // If we have less cells than points, add new cells.
+ if (numOfCells < nr_points)
+ {
+ for (int i = numOfCells; i < nr_points; i++)
+ {
+ vertices->InsertNextCell(1);
+ vertices->InsertCellPoint(i);
+ }
+ }
+ // if we too many cells than points, set size (doesn't free excessive memory)
+ else if (numOfCells > nr_points)
+ {
+ vertices->ResizeExact(nr_points, nr_points);
+ }
+
+ polydata->SetPoints(points);
+ polydata->SetVerts(vertices);
+
+#else
vtkSmartPointer<vtkIdTypeArray> cells = vertices->GetData ();
updateCells (cells, initcells, nr_points);
// Set the cells and the vertices
vertices->SetCells (nr_points, cells);
+#endif
}
//////////////////////////////////////////////////////////////////////////////////////////////
return (style_->getCameraFile ());
}
-/////////////////////////////////////////////////////////////////////////////////////////////
-PCL_DEPRECATED(1, 12, "This method can safely not be called anymore as we're just re-rendering all scenes now.")
-void
-pcl::visualization::PCLVisualizer::updateCamera ()
-{
- rens_->InitTraversal ();
- // Update the camera parameters
- win_->Render ();
-}
-
/////////////////////////////////////////////////////////////////////////////////////////////
bool
pcl::visualization::PCLVisualizer::updateShapePose (const std::string &id, const Eigen::Affine3f& pose)
return (true);
}
+////////////////////////////////////////////////////////////////////////////////////////////
+bool
+pcl::visualization::PCLVisualizer::addEllipsoid (
+ const Eigen::Isometry3d &transform,
+ double radius_x, double radius_y, double radius_z,
+ const std::string &id, int viewport)
+{
+ // Check to see if this ID entry already exists (has it been already added to the visualizer?)
+ ShapeActorMap::iterator am_it = shape_actor_map_->find (id);
+ if (am_it != shape_actor_map_->end ())
+ {
+ pcl::console::print_warn (stderr, "[addEllipsoid] A shape with id <%s> already exists! Please choose a different id and retry.\n", id.c_str ());
+ return (false);
+ }
+
+ vtkSmartPointer<vtkDataSet> data = createEllipsoid (transform, radius_x, radius_y, radius_z);
+
+ // Create an Actor
+ vtkSmartPointer<vtkLODActor> actor;
+ createActorFromVTKDataSet (data, actor);
+ actor->GetProperty ()->SetRepresentationToSurface ();
+ addActorToRenderer (actor, viewport);
+
+ // Save the pointer/ID pair to the global actor map
+ (*shape_actor_map_)[id] = actor;
+ return (true);
+}
+
////////////////////////////////////////////////////////////////////////////////////////////
bool
pcl::visualization::PCLVisualizer::addSphere (const pcl::ModelCoefficients &coefficients,
// Update the cells
cells = vtkSmartPointer<vtkCellArray>::New ();
- vtkIdType *cell = cells->WritePointer (verts.size (), verts.size () * (max_size_of_polygon + 1));
- int idx = 0;
- if (!lookup.empty ())
- {
- for (std::size_t i = 0; i < verts.size (); ++i, ++idx)
- {
- std::size_t n_points = verts[i].vertices.size ();
- *cell++ = n_points;
- for (std::size_t j = 0; j < n_points; j++, cell++, ++idx)
- *cell = lookup[verts[i].vertices[j]];
- }
- }
- else
- {
- for (std::size_t i = 0; i < verts.size (); ++i, ++idx)
- {
- std::size_t n_points = verts[i].vertices.size ();
- *cell++ = n_points;
- for (std::size_t j = 0; j < n_points; j++, cell++, ++idx)
- *cell = verts[i].vertices[j];
- }
- }
+
+ const auto idx = details::fillCells(lookup, verts, cells, max_size_of_polygon);
+
cells->GetData ()->SetNumberOfValues (idx);
cells->Squeeze ();
// Set the the vertices
{
pcl::PointCloud<pcl::PointXYZRGB> cloud;
pcl::fromPCLPointCloud2 (mesh.cloud, cloud);
- if (cloud.points.empty ())
+ if (cloud.empty ())
{
PCL_ERROR ("[PCLVisualizer::addTextureMesh] Cloud is empty!\n");
return (false);
// add a texture coordinates array per texture
vtkSmartPointer<vtkFloatArray> coordinates = vtkSmartPointer<vtkFloatArray>::New ();
coordinates->SetNumberOfComponents (2);
- std::stringstream ss; ss << "TCoords" << tex_id;
- std::string this_coordinates_name = ss.str ();
+ const std::string this_coordinates_name = "TCoords" + std::to_string(tex_id);
coordinates->SetName (this_coordinates_name.c_str ());
for (std::size_t t = 0; t < mesh.tex_coordinates.size (); ++t)
//center object
double CoM[3];
- vtkIdType npts_com = 0, *ptIds_com = nullptr;
+ vtkIdType npts_com = 0;
+ vtkCellPtsPtr ptIds_com = nullptr;
vtkSmartPointer<vtkCellArray> cells_com = polydata->GetPolys ();
double center[3], p1_com[3], p2_com[3], p3_com[3], totalArea_com = 0;
// * Compute area of the mesh
//////////////////////////////
vtkSmartPointer<vtkCellArray> cells = mapper->GetInput ()->GetPolys ();
- vtkIdType npts = 0, *ptIds = nullptr;
+ vtkIdType npts = 0;
+ vtkCellPtsPtr ptIds = nullptr;
double p1[3], p2[3], p3[3], totalArea = 0;
- for (cells->InitTraversal (); cells->GetNextCell (npts, ptIds);)
+ for (cells->InitTraversal (); cells->GetNextCell(npts, ptIds);)
{
polydata->GetPoint (ptIds[0], p1);
polydata->GetPoint (ptIds[1], p2);
polydata->BuildCells ();
vtkSmartPointer<vtkCellArray> cells = polydata->GetPolys ();
- vtkIdType npts = 0, *ptIds = nullptr;
+ vtkIdType npts = 0;
+ vtkCellPtsPtr ptIds = nullptr;
double p1[3], p2[3], p3[3], area, totalArea = 0;
for (cells->InitTraversal (); cells->GetNextCell (npts, ptIds);)
vtkSmartPointer<vtkPolyData> polydata;
vtkSmartPointer<vtkIdTypeArray> initcells;
+
// Convert the PointCloud to VTK PolyData
convertPointCloudToVTKPolyData (geometry_handler, polydata, initcells);
- // use the given geometry handler
// Get the colors from the handler
bool has_colors = false;
if (!data->Resize(nr_points))
{
- PCL_ERROR("[point_cloud_handlers::getGeometry] Failed to allocate space for points in VTK array.");
+ PCL_ERROR("[point_cloud_handlers::getGeometry] Failed to allocate space for points in VTK array.\n");
throw std::bad_alloc();
}
#include <vtkPointData.h>
#include <vtkVertexGlyphFilter.h>
#include <vtkPlanes.h>
-#include <vtkXYPlotActor.h>
#include <vtkRenderer.h>
#include <vtkRenderWindow.h>
else if (eventid == vtkCommand::LeftButtonReleaseEvent)
{
style->OnLeftButtonUp ();
- std::vector<int> indices;
+ pcl::Indices indices;
int nb_points = performAreaPick (iren, indices);
AreaPickingEvent event (nb_points, indices);
style->area_picking_signal_ (event);
/////////////////////////////////////////////////////////////////////////////////////////////
int
pcl::visualization::PointPickingCallback::performAreaPick (vtkRenderWindowInteractor *iren,
- std::vector<int> &indices) const
+ pcl::Indices &indices) const
{
vtkAreaPicker *picker = static_cast<vtkAreaPicker*> (iren->GetPicker ());
vtkRenderer *ren = iren->FindPokedRenderer (iren->GetEventPosition ()[0], iren->GetEventPosition ()[1]);
indices.reserve (ids->GetNumberOfTuples ());
for(vtkIdType i = 0; i < ids->GetNumberOfTuples (); i++)
- indices.push_back (static_cast<int> (ids->GetValue (i)));
+ indices.push_back (static_cast<index_t> (ids->GetValue (i)));
return (static_cast<int> (selected->GetNumberOfPoints ()));
}